summaryrefslogtreecommitdiffstats
path: root/src/seastar/dpdk/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'src/seastar/dpdk/drivers/crypto')
-rw-r--r--src/seastar/dpdk/drivers/crypto/Makefile59
-rw-r--r--src/seastar/dpdk/drivers/crypto/aesni_gcm/Makefile59
-rw-r--r--src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_ops.h62
-rw-r--r--src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c541
-rw-r--r--src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c363
-rw-r--r--src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h121
-rw-r--r--src/seastar/dpdk/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map3
-rw-r--r--src/seastar/dpdk/drivers/crypto/aesni_mb/Makefile61
-rw-r--r--src/seastar/dpdk/drivers/crypto/aesni_mb/aesni_mb_ops.h236
-rw-r--r--src/seastar/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c808
-rw-r--r--src/seastar/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c529
-rw-r--r--src/seastar/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h240
-rw-r--r--src/seastar/dpdk/drivers/crypto/aesni_mb/rte_pmd_aesni_version.map3
-rw-r--r--src/seastar/dpdk/drivers/crypto/armv8/Makefile65
-rw-r--r--src/seastar/dpdk/drivers/crypto/armv8/rte_armv8_pmd.c906
-rw-r--r--src/seastar/dpdk/drivers/crypto/armv8/rte_armv8_pmd_ops.c370
-rw-r--r--src/seastar/dpdk/drivers/crypto/armv8/rte_armv8_pmd_private.h211
-rw-r--r--src/seastar/dpdk/drivers/crypto/armv8/rte_armv8_pmd_version.map3
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/Makefile78
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c1656
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h70
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h368
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/compat.h157
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/desc.h2601
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/desc/algo.h465
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/desc/common.h131
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h1547
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta.h954
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h346
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h251
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h207
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h222
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h335
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h402
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h445
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h196
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h599
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h732
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h823
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h208
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h75
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h185
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/mc/dpseci.c558
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h746
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h256
-rw-r--r--src/seastar/dpdk/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map4
-rw-r--r--src/seastar/dpdk/drivers/crypto/kasumi/Makefile62
-rw-r--r--src/seastar/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd.c666
-rw-r--r--src/seastar/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c345
-rw-r--r--src/seastar/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_private.h106
-rw-r--r--src/seastar/dpdk/drivers/crypto/kasumi/rte_pmd_kasumi_version.map3
-rw-r--r--src/seastar/dpdk/drivers/crypto/null/Makefile54
-rw-r--r--src/seastar/dpdk/drivers/crypto/null/null_crypto_pmd.c270
-rw-r--r--src/seastar/dpdk/drivers/crypto/null/null_crypto_pmd_ops.c356
-rw-r--r--src/seastar/dpdk/drivers/crypto/null/null_crypto_pmd_private.h93
-rw-r--r--src/seastar/dpdk/drivers/crypto/null/rte_pmd_null_crypto_version.map3
-rw-r--r--src/seastar/dpdk/drivers/crypto/openssl/Makefile53
-rw-r--r--src/seastar/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c1374
-rw-r--r--src/seastar/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c729
-rw-r--r--src/seastar/dpdk/drivers/crypto/openssl/rte_openssl_pmd_private.h176
-rw-r--r--src/seastar/dpdk/drivers/crypto/openssl/rte_pmd_openssl_version.map3
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/Makefile59
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h175
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_fw.h316
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h404
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_hw.h309
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_adf/qat_algs.h174
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c1014
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_crypto.c1293
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_crypto.h141
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_crypto_capabilities.h574
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_logs.h78
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/qat_qp.c490
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/rte_pmd_qat_version.map3
-rw-r--r--src/seastar/dpdk/drivers/crypto/qat/rte_qat_cryptodev.c163
-rw-r--r--src/seastar/dpdk/drivers/crypto/scheduler/Makefile60
-rw-r--r--src/seastar/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.c593
-rw-r--r--src/seastar/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.h334
-rw-r--r--src/seastar/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h85
-rw-r--r--src/seastar/dpdk/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map23
-rw-r--r--src/seastar/dpdk/drivers/crypto/scheduler/scheduler_failover.c287
-rw-r--r--src/seastar/dpdk/drivers/crypto/scheduler/scheduler_pkt_size_distr.c464
-rw-r--r--src/seastar/dpdk/drivers/crypto/scheduler/scheduler_pmd.c456
-rw-r--r--src/seastar/dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c571
-rw-r--r--src/seastar/dpdk/drivers/crypto/scheduler/scheduler_pmd_private.h156
-rw-r--r--src/seastar/dpdk/drivers/crypto/scheduler/scheduler_roundrobin.c281
-rw-r--r--src/seastar/dpdk/drivers/crypto/snow3g/Makefile62
-rw-r--r--src/seastar/dpdk/drivers/crypto/snow3g/rte_pmd_snow3g_version.map3
-rw-r--r--src/seastar/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd.c655
-rw-r--r--src/seastar/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c343
-rw-r--r--src/seastar/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_private.h107
-rw-r--r--src/seastar/dpdk/drivers/crypto/zuc/Makefile62
-rw-r--r--src/seastar/dpdk/drivers/crypto/zuc/rte_pmd_zuc_version.map3
-rw-r--r--src/seastar/dpdk/drivers/crypto/zuc/rte_zuc_pmd.c554
-rw-r--r--src/seastar/dpdk/drivers/crypto/zuc/rte_zuc_pmd_ops.c343
-rw-r--r--src/seastar/dpdk/drivers/crypto/zuc/rte_zuc_pmd_private.h108
96 files changed, 34293 insertions, 0 deletions
diff --git a/src/seastar/dpdk/drivers/crypto/Makefile b/src/seastar/dpdk/drivers/crypto/Makefile
new file mode 100644
index 00000000..7a719b9d
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/Makefile
@@ -0,0 +1,59 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+core-libs := librte_eal librte_mbuf librte_mempool librte_ring librte_cryptodev
+
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm
+DEPDIRS-aesni_gcm = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
+DEPDIRS-aesni_mb = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += armv8
+DEPDIRS-armv8 = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += openssl
+DEPDIRS-openssl = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat
+DEPDIRS-qat = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler
+DEPDIRS-scheduler = $(core-libs) librte_kvargs librte_reorder
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
+DEPDIRS-snow3g = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi
+DEPDIRS-kasumi = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += zuc
+DEPDIRS-zuc = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
+DEPDIRS-null = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec
+DEPDIRS-dpaa2_sec = $(core-libs)
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/seastar/dpdk/drivers/crypto/aesni_gcm/Makefile b/src/seastar/dpdk/drivers/crypto/aesni_gcm/Makefile
new file mode 100644
index 00000000..59a7c6a9
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/aesni_gcm/Makefile
@@ -0,0 +1,59 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+endif
+
+# library name
+LIB = librte_pmd_aesni_gcm.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_aesni_gcm_version.map
+
+# external library dependencies
+LDLIBS += -lisal_crypto
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd_ops.c
+
+# export include files
+SYMLINK-y-include +=
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
new file mode 100644
index 00000000..e9de6546
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -0,0 +1,62 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _AESNI_GCM_OPS_H_
+#define _AESNI_GCM_OPS_H_
+
+#ifndef LINUX
+#define LINUX
+#endif
+
+#include <isa-l_crypto/aes_gcm.h>
+
+typedef void (*aesni_gcm_init_t)(struct gcm_data *my_ctx_data,
+ uint8_t *iv,
+ uint8_t const *aad,
+ uint64_t aad_len);
+
+typedef void (*aesni_gcm_update_t)(struct gcm_data *my_ctx_data,
+ uint8_t *out,
+ const uint8_t *in,
+ uint64_t plaintext_len);
+
+typedef void (*aesni_gcm_finalize_t)(struct gcm_data *my_ctx_data,
+ uint8_t *auth_tag,
+ uint64_t auth_tag_len);
+
+struct aesni_gcm_ops {
+ aesni_gcm_init_t init;
+ aesni_gcm_update_t update;
+ aesni_gcm_finalize_t finalize;
+};
+
+#endif /* _AESNI_GCM_OPS_H_ */
diff --git a/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
new file mode 100644
index 00000000..101ef98b
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -0,0 +1,541 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+#include <rte_byteorder.h>
+
+#include "aesni_gcm_pmd_private.h"
+
+/** GCM encode functions pointer table */
+static const struct aesni_gcm_ops aesni_gcm_enc[] = {
+ [AESNI_GCM_KEY_128] = {
+ aesni_gcm128_init,
+ aesni_gcm128_enc_update,
+ aesni_gcm128_enc_finalize
+ },
+ [AESNI_GCM_KEY_256] = {
+ aesni_gcm256_init,
+ aesni_gcm256_enc_update,
+ aesni_gcm256_enc_finalize
+ }
+};
+
+/** GCM decode functions pointer table */
+static const struct aesni_gcm_ops aesni_gcm_dec[] = {
+ [AESNI_GCM_KEY_128] = {
+ aesni_gcm128_init,
+ aesni_gcm128_dec_update,
+ aesni_gcm128_dec_finalize
+ },
+ [AESNI_GCM_KEY_256] = {
+ aesni_gcm256_init,
+ aesni_gcm256_dec_update,
+ aesni_gcm256_dec_finalize
+ }
+};
+
+/** Parse crypto xform chain and set private session parameters */
+int
+aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform;
+ const struct rte_crypto_sym_xform *cipher_xform;
+
+ if (xform->next == NULL || xform->next->next != NULL) {
+ GCM_LOG_ERR("Two and only two chained xform required");
+ return -EINVAL;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ auth_xform = xform->next;
+ cipher_xform = xform;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ } else {
+ GCM_LOG_ERR("Cipher and auth xform required");
+ return -EINVAL;
+ }
+
+ if (!(cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_GCM &&
+ (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GCM ||
+ auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC))) {
+ GCM_LOG_ERR("We only support AES GCM and AES GMAC");
+ return -EINVAL;
+ }
+
+ /* Select Crypto operation */
+ if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
+ auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+ else if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+ auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
+ sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+ else {
+ GCM_LOG_ERR("Cipher/Auth operations: Encrypt/Generate or"
+ " Decrypt/Verify are valid only");
+ return -EINVAL;
+ }
+
+ /* Check key length and calculate GCM pre-compute. */
+ switch (cipher_xform->cipher.key.length) {
+ case 16:
+ aesni_gcm128_pre(cipher_xform->cipher.key.data, &sess->gdata);
+ sess->key = AESNI_GCM_KEY_128;
+
+ break;
+ case 32:
+ aesni_gcm256_pre(cipher_xform->cipher.key.data, &sess->gdata);
+ sess->key = AESNI_GCM_KEY_256;
+
+ break;
+ default:
+ GCM_LOG_ERR("Unsupported cipher key length");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/** Get gcm session */
+static struct aesni_gcm_session *
+aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
+{
+ struct aesni_gcm_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->session->dev_type
+ != RTE_CRYPTODEV_AESNI_GCM_PMD))
+ return sess;
+
+ sess = (struct aesni_gcm_session *)op->session->_private;
+ } else {
+ void *_sess;
+
+ if (rte_mempool_get(qp->sess_mp, &_sess))
+ return sess;
+
+ sess = (struct aesni_gcm_session *)
+ ((struct rte_cryptodev_sym_session *)_sess)->_private;
+
+ if (unlikely(aesni_gcm_set_session_parameters(sess,
+ op->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ sess = NULL;
+ }
+ }
+ return sess;
+}
+
+/**
+ * Process a crypto operation and complete a JOB_AES_HMAC job structure for
+ * submission to the multi buffer library for processing.
+ *
+ * @param qp queue pair
+ * @param op symmetric crypto operation
+ * @param session GCM session
+ *
+ * @return
+ *
+ */
+static int
+process_gcm_crypto_op(struct rte_crypto_sym_op *op,
+ struct aesni_gcm_session *session)
+{
+ uint8_t *src, *dst;
+ struct rte_mbuf *m_src = op->m_src;
+ uint32_t offset = op->cipher.data.offset;
+ uint32_t part_len, total_len, data_len;
+
+ RTE_ASSERT(m_src != NULL);
+
+ while (offset >= m_src->data_len) {
+ offset -= m_src->data_len;
+ m_src = m_src->next;
+
+ RTE_ASSERT(m_src != NULL);
+ }
+
+ data_len = m_src->data_len - offset;
+ part_len = (data_len < op->cipher.data.length) ? data_len :
+ op->cipher.data.length;
+
+ /* Destination buffer is required when segmented source buffer */
+ RTE_ASSERT((part_len == op->cipher.data.length) ||
+ ((part_len != op->cipher.data.length) &&
+ (op->m_dst != NULL)));
+ /* Segmented destination buffer is not supported */
+ RTE_ASSERT((op->m_dst == NULL) ||
+ ((op->m_dst != NULL) &&
+ rte_pktmbuf_is_contiguous(op->m_dst)));
+
+
+ dst = op->m_dst ?
+ rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
+ op->cipher.data.offset) :
+ rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
+ op->cipher.data.offset);
+
+ src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
+
+ /* sanity checks */
+ if (op->cipher.iv.length != 16 && op->cipher.iv.length != 12 &&
+ op->cipher.iv.length != 0) {
+ GCM_LOG_ERR("iv");
+ return -1;
+ }
+
+ /*
+ * GCM working in 12B IV mode => 16B pre-counter block we need
+ * to set BE LSB to 1, driver expects that 16B is allocated
+ */
+ if (op->cipher.iv.length == 12) {
+ uint32_t *iv_padd = (uint32_t *)&op->cipher.iv.data[12];
+ *iv_padd = rte_bswap32(1);
+ }
+
+ if (op->auth.digest.length != 16 &&
+ op->auth.digest.length != 12 &&
+ op->auth.digest.length != 8) {
+ GCM_LOG_ERR("digest");
+ return -1;
+ }
+
+ if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
+
+ aesni_gcm_enc[session->key].init(&session->gdata,
+ op->cipher.iv.data,
+ op->auth.aad.data,
+ (uint64_t)op->auth.aad.length);
+
+ aesni_gcm_enc[session->key].update(&session->gdata, dst, src,
+ (uint64_t)part_len);
+ total_len = op->cipher.data.length - part_len;
+
+ while (total_len) {
+ dst += part_len;
+ m_src = m_src->next;
+
+ RTE_ASSERT(m_src != NULL);
+
+ src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ part_len = (m_src->data_len < total_len) ?
+ m_src->data_len : total_len;
+
+ aesni_gcm_enc[session->key].update(&session->gdata,
+ dst, src,
+ (uint64_t)part_len);
+ total_len -= part_len;
+ }
+
+ aesni_gcm_enc[session->key].finalize(&session->gdata,
+ op->auth.digest.data,
+ (uint64_t)op->auth.digest.length);
+ } else { /* session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION */
+ uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(op->m_dst ?
+ op->m_dst : op->m_src,
+ op->auth.digest.length);
+
+ if (!auth_tag) {
+ GCM_LOG_ERR("auth_tag");
+ return -1;
+ }
+
+ aesni_gcm_dec[session->key].init(&session->gdata,
+ op->cipher.iv.data,
+ op->auth.aad.data,
+ (uint64_t)op->auth.aad.length);
+
+ aesni_gcm_dec[session->key].update(&session->gdata, dst, src,
+ (uint64_t)part_len);
+ total_len = op->cipher.data.length - part_len;
+
+ while (total_len) {
+ dst += part_len;
+ m_src = m_src->next;
+
+ RTE_ASSERT(m_src != NULL);
+
+ src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ part_len = (m_src->data_len < total_len) ?
+ m_src->data_len : total_len;
+
+ aesni_gcm_dec[session->key].update(&session->gdata,
+ dst, src,
+ (uint64_t)part_len);
+ total_len -= part_len;
+ }
+
+ aesni_gcm_dec[session->key].finalize(&session->gdata,
+ auth_tag,
+ (uint64_t)op->auth.digest.length);
+ }
+
+ return 0;
+}
+
+/**
+ * Process a completed job and return rte_mbuf which job processed
+ *
+ * @param job JOB_AES_HMAC job to process
+ *
+ * @return
+ * - Returns processed mbuf which is trimmed of output digest used in
+ * verification of supplied digest in the case of a HASH_CIPHER operation
+ * - Returns NULL on invalid job
+ */
+static void
+post_process_gcm_crypto_op(struct rte_crypto_op *op)
+{
+ struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+
+ struct aesni_gcm_session *session =
+ (struct aesni_gcm_session *)op->sym->session->_private;
+
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ /* Verify digest if required */
+ if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+
+ uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
+ m->data_len - op->sym->auth.digest.length);
+
+#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
+ rte_hexdump(stdout, "auth tag (orig):",
+ op->sym->auth.digest.data, op->sym->auth.digest.length);
+ rte_hexdump(stdout, "auth tag (calc):",
+ tag, op->sym->auth.digest.length);
+#endif
+
+ if (memcmp(tag, op->sym->auth.digest.data,
+ op->sym->auth.digest.length) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+ /* trim area used for digest from mbuf */
+ rte_pktmbuf_trim(m, op->sym->auth.digest.length);
+ }
+}
+
+/**
+ * Process a completed GCM request
+ *
+ * @param qp Queue Pair to process
+ * @param job JOB_AES_HMAC job
+ *
+ * @return
+ * - Number of processed jobs
+ */
+static void
+handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
+ struct rte_crypto_op *op)
+{
+ post_process_gcm_crypto_op(op);
+
+ /* Free session if a session-less crypto op */
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
+ }
+}
+
+static uint16_t
+aesni_gcm_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct aesni_gcm_session *sess;
+ struct aesni_gcm_qp *qp = queue_pair;
+
+ int retval = 0;
+ unsigned int i, nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+
+ for (i = 0; i < nb_dequeued; i++) {
+
+ sess = aesni_gcm_get_session(qp, ops[i]->sym);
+ if (unlikely(sess == NULL)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ qp->qp_stats.dequeue_err_count++;
+ break;
+ }
+
+ retval = process_gcm_crypto_op(ops[i]->sym, sess);
+ if (retval < 0) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ qp->qp_stats.dequeue_err_count++;
+ break;
+ }
+
+ handle_completed_gcm_crypto_op(qp, ops[i]);
+ }
+
+ qp->qp_stats.dequeued_count += i;
+
+ return i;
+}
+
+static uint16_t
+aesni_gcm_pmd_enqueue_burst(void *queue_pair,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct aesni_gcm_qp *qp = queue_pair;
+
+ unsigned int nb_enqueued;
+
+ nb_enqueued = rte_ring_enqueue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ qp->qp_stats.enqueued_count += nb_enqueued;
+
+ return nb_enqueued;
+}
+
+static int aesni_gcm_remove(struct rte_vdev_device *vdev);
+
+static int
+aesni_gcm_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_crypto_vdev_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct aesni_gcm_private *internals;
+
+ if (init_params->name[0] == '\0')
+ snprintf(init_params->name, sizeof(init_params->name),
+ "%s", name);
+
+ /* Check CPU for support for AES instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+ GCM_LOG_ERR("AES instructions not supported by CPU");
+ return -EFAULT;
+ }
+
+ dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
+ sizeof(struct aesni_gcm_private), init_params->socket_id);
+ if (dev == NULL) {
+ GCM_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->dev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
+ dev->dev_ops = rte_aesni_gcm_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
+ dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_CPU_AESNI |
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ return 0;
+
+init_error:
+ GCM_LOG_ERR("driver %s: create failed", init_params->name);
+
+ aesni_gcm_remove(vdev);
+ return -EFAULT;
+}
+
+static int
+aesni_gcm_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_crypto_vdev_init_params init_params = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id(),
+ {0}
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+ rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.socket_id);
+ if (init_params.name[0] != '\0')
+ RTE_LOG(INFO, PMD, " User defined name = %s\n",
+ init_params.name);
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.max_nb_sessions);
+
+ return aesni_gcm_create(name, vdev, &init_params);
+}
+
+static int
+aesni_gcm_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ GCM_LOG_INFO("Closing AESNI crypto device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+static struct rte_vdev_driver aesni_gcm_pmd_drv = {
+ .probe = aesni_gcm_probe,
+ .remove = aesni_gcm_remove
+};
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int>");
diff --git a/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
new file mode 100644
index 00000000..1fc047be
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
@@ -0,0 +1,363 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "aesni_gcm_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
+ { /* AES GMAC (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 16
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 65535,
+ .increment = 1
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 16
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 65535,
+ .increment = 1
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 16
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+aesni_gcm_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+aesni_gcm_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+aesni_gcm_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+aesni_gcm_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+aesni_gcm_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+aesni_gcm_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct aesni_gcm_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->dev_type = dev->dev_type;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = aesni_gcm_pmd_capabilities;
+
+ dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ }
+}
+
+/** Release queue pair */
+static int
+aesni_gcm_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct aesni_gcm_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "aesni_gcm_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ GCM_LOG_INFO("Reusing existing ring %s for processed"
+ " packets", qp->name);
+ return r;
+ }
+
+ GCM_LOG_ERR("Unable to reuse existing ring %s for processed"
+ " packets", qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct aesni_gcm_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ aesni_gcm_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_pkts == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = dev->data->session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Start queue pair */
+static int
+aesni_gcm_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+aesni_gcm_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni gcm session structure */
+static unsigned
+aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct aesni_gcm_session);
+}
+
+/** Configure a aesni gcm session from a crypto xform chain */
+static void *
+aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ if (unlikely(sess == NULL)) {
+ GCM_LOG_ERR("invalid session struct");
+ return NULL;
+ }
+
+ if (aesni_gcm_set_session_parameters(sess, xform) != 0) {
+ GCM_LOG_ERR("failed configure session parameters");
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+ if (sess)
+ memset(sess, 0, sizeof(struct aesni_gcm_session));
+}
+
+struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
+ .dev_configure = aesni_gcm_pmd_config,
+ .dev_start = aesni_gcm_pmd_start,
+ .dev_stop = aesni_gcm_pmd_stop,
+ .dev_close = aesni_gcm_pmd_close,
+
+ .stats_get = aesni_gcm_pmd_stats_get,
+ .stats_reset = aesni_gcm_pmd_stats_reset,
+
+ .dev_infos_get = aesni_gcm_pmd_info_get,
+
+ .queue_pair_setup = aesni_gcm_pmd_qp_setup,
+ .queue_pair_release = aesni_gcm_pmd_qp_release,
+ .queue_pair_start = aesni_gcm_pmd_qp_start,
+ .queue_pair_stop = aesni_gcm_pmd_qp_stop,
+ .queue_pair_count = aesni_gcm_pmd_qp_count,
+
+ .session_get_size = aesni_gcm_pmd_session_get_size,
+ .session_configure = aesni_gcm_pmd_session_configure,
+ .session_clear = aesni_gcm_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;
diff --git a/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
new file mode 100644
index 00000000..0496b447
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
@@ -0,0 +1,121 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_AESNI_GCM_PMD_PRIVATE_H_
+#define _RTE_AESNI_GCM_PMD_PRIVATE_H_
+
+#include "aesni_gcm_ops.h"
+
+#define GCM_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
+#define GCM_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \
+ __func__, __LINE__, ## args)
+
+#define GCM_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \
+ __func__, __LINE__, ## args)
+#else
+#define GCM_LOG_INFO(fmt, args...)
+#define GCM_LOG_DBG(fmt, args...)
+#endif
+
+
+/** private data structure for each virtual AESNI GCM device */
+struct aesni_gcm_private {
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+ unsigned max_nb_sessions;
+ /**< Max number of sessions supported by device */
+};
+
+struct aesni_gcm_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_pkts;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+} __rte_cache_aligned;
+
+
+enum aesni_gcm_operation {
+ AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION,
+ AESNI_GCM_OP_AUTHENTICATED_DECRYPTION
+};
+
+enum aesni_gcm_key {
+ AESNI_GCM_KEY_128,
+ AESNI_GCM_KEY_256
+};
+
+/** AESNI GCM private session structure */
+struct aesni_gcm_session {
+ enum aesni_gcm_operation op;
+ /**< GCM operation type */
+ enum aesni_gcm_key key;
+ /**< GCM key type */
+ struct gcm_data gdata __rte_cache_aligned;
+ /**< GCM parameters */
+};
+
+
+/**
+ * Setup GCM session parameters
+ * @param sess aesni gcm session structure
+ * @param xform crypto transform chain
+ *
+ * @return
+ * - On success returns 0
+ * - On failure returns error code < 0
+ */
+extern int
+aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/**
+ * Device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops;
+
+
+#endif /* _RTE_AESNI_GCM_PMD_PRIVATE_H_ */
diff --git a/src/seastar/dpdk/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map b/src/seastar/dpdk/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map
new file mode 100644
index 00000000..dc4d417b
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map
@@ -0,0 +1,3 @@
+DPDK_16.04 {
+ local: *;
+};
diff --git a/src/seastar/dpdk/drivers/crypto/aesni_mb/Makefile b/src/seastar/dpdk/drivers/crypto/aesni_mb/Makefile
new file mode 100644
index 00000000..611d4123
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/aesni_mb/Makefile
@@ -0,0 +1,61 @@
+# BSD LICENSE
+#
+# Copyright(c) 2015 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),)
+$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable")
+endif
+endif
+
+# library name
+LIB = librte_pmd_aesni_mb.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_aesni_version.map
+
+# external library dependencies
+CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
+CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
+LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/seastar/dpdk/drivers/crypto/aesni_mb/aesni_mb_ops.h b/src/seastar/dpdk/drivers/crypto/aesni_mb/aesni_mb_ops.h
new file mode 100644
index 00000000..59c3ee1e
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/aesni_mb/aesni_mb_ops.h
@@ -0,0 +1,236 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _AESNI_MB_OPS_H_
+#define _AESNI_MB_OPS_H_
+
+#ifndef LINUX
+#define LINUX
+#endif
+
+#include <mb_mgr.h>
+#include <aux_funcs.h>
+
+enum aesni_mb_vector_mode {
+ RTE_AESNI_MB_NOT_SUPPORTED = 0,
+ RTE_AESNI_MB_SSE,
+ RTE_AESNI_MB_AVX,
+ RTE_AESNI_MB_AVX2,
+ RTE_AESNI_MB_AVX512
+};
+
+typedef void (*md5_one_block_t)(const void *data, void *digest);
+
+typedef void (*sha1_one_block_t)(const void *data, void *digest);
+typedef void (*sha224_one_block_t)(const void *data, void *digest);
+typedef void (*sha256_one_block_t)(const void *data, void *digest);
+typedef void (*sha384_one_block_t)(const void *data, void *digest);
+typedef void (*sha512_one_block_t)(const void *data, void *digest);
+
+typedef void (*aes_keyexp_128_t)
+ (const void *key, void *enc_exp_keys, void *dec_exp_keys);
+typedef void (*aes_keyexp_192_t)
+ (const void *key, void *enc_exp_keys, void *dec_exp_keys);
+typedef void (*aes_keyexp_256_t)
+ (const void *key, void *enc_exp_keys, void *dec_exp_keys);
+
+typedef void (*aes_xcbc_expand_key_t)
+ (const void *key, void *exp_k1, void *k2, void *k3);
+
+/** Multi-buffer library function pointer table */
+struct aesni_mb_op_fns {
+ struct {
+ init_mb_mgr_t init_mgr;
+ /**< Initialise scheduler */
+ get_next_job_t get_next;
+ /**< Get next free job structure */
+ submit_job_t submit;
+ /**< Submit job to scheduler */
+ get_completed_job_t get_completed_job;
+ /**< Get completed job */
+ flush_job_t flush_job;
+ /**< flush jobs from manager */
+ } job;
+ /**< multi buffer manager functions */
+
+ struct {
+ struct {
+ md5_one_block_t md5;
+ /**< MD5 one block hash */
+ sha1_one_block_t sha1;
+ /**< SHA1 one block hash */
+ sha224_one_block_t sha224;
+ /**< SHA224 one block hash */
+ sha256_one_block_t sha256;
+ /**< SHA256 one block hash */
+ sha384_one_block_t sha384;
+ /**< SHA384 one block hash */
+ sha512_one_block_t sha512;
+ /**< SHA512 one block hash */
+ } one_block;
+ /**< one block hash functions */
+
+ struct {
+ aes_keyexp_128_t aes128;
+ /**< AES128 key expansions */
+ aes_keyexp_192_t aes192;
+ /**< AES192 key expansions */
+ aes_keyexp_256_t aes256;
+ /**< AES256 key expansions */
+
+ aes_xcbc_expand_key_t aes_xcbc;
+ /**< AES XCBC key expansions */
+ } keyexp;
+ /**< Key expansion functions */
+ } aux;
+ /**< Auxiliary functions */
+};
+
+
+static const struct aesni_mb_op_fns job_ops[] = {
+ [RTE_AESNI_MB_NOT_SUPPORTED] = {
+ .job = {
+ NULL
+ },
+ .aux = {
+ .one_block = {
+ NULL
+ },
+ .keyexp = {
+ NULL
+ }
+ }
+ },
+ [RTE_AESNI_MB_SSE] = {
+ .job = {
+ init_mb_mgr_sse,
+ get_next_job_sse,
+ submit_job_sse,
+ get_completed_job_sse,
+ flush_job_sse
+ },
+ .aux = {
+ .one_block = {
+ md5_one_block_sse,
+ sha1_one_block_sse,
+ sha224_one_block_sse,
+ sha256_one_block_sse,
+ sha384_one_block_sse,
+ sha512_one_block_sse
+ },
+ .keyexp = {
+ aes_keyexp_128_sse,
+ aes_keyexp_192_sse,
+ aes_keyexp_256_sse,
+ aes_xcbc_expand_key_sse
+ }
+ }
+ },
+ [RTE_AESNI_MB_AVX] = {
+ .job = {
+ init_mb_mgr_avx,
+ get_next_job_avx,
+ submit_job_avx,
+ get_completed_job_avx,
+ flush_job_avx
+ },
+ .aux = {
+ .one_block = {
+ md5_one_block_avx,
+ sha1_one_block_avx,
+ sha224_one_block_avx,
+ sha256_one_block_avx,
+ sha384_one_block_avx,
+ sha512_one_block_avx
+ },
+ .keyexp = {
+ aes_keyexp_128_avx,
+ aes_keyexp_192_avx,
+ aes_keyexp_256_avx,
+ aes_xcbc_expand_key_avx
+ }
+ }
+ },
+ [RTE_AESNI_MB_AVX2] = {
+ .job = {
+ init_mb_mgr_avx2,
+ get_next_job_avx2,
+ submit_job_avx2,
+ get_completed_job_avx2,
+ flush_job_avx2
+ },
+ .aux = {
+ .one_block = {
+ md5_one_block_avx2,
+ sha1_one_block_avx2,
+ sha224_one_block_avx2,
+ sha256_one_block_avx2,
+ sha384_one_block_avx2,
+ sha512_one_block_avx2
+ },
+ .keyexp = {
+ aes_keyexp_128_avx2,
+ aes_keyexp_192_avx2,
+ aes_keyexp_256_avx2,
+ aes_xcbc_expand_key_avx2
+ }
+ }
+ },
+ [RTE_AESNI_MB_AVX512] = {
+ .job = {
+ init_mb_mgr_avx512,
+ get_next_job_avx512,
+ submit_job_avx512,
+ get_completed_job_avx512,
+ flush_job_avx512
+ },
+ .aux = {
+ .one_block = {
+ md5_one_block_avx512,
+ sha1_one_block_avx512,
+ sha224_one_block_avx512,
+ sha256_one_block_avx512,
+ sha384_one_block_avx512,
+ sha512_one_block_avx512
+ },
+ .keyexp = {
+ aes_keyexp_128_avx512,
+ aes_keyexp_192_avx512,
+ aes_keyexp_256_avx512,
+ aes_xcbc_expand_key_avx512
+ }
+ }
+ }
+};
+
+
+#endif /* _AESNI_MB_OPS_H_ */
diff --git a/src/seastar/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/src/seastar/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
new file mode 100644
index 00000000..45b25c9d
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -0,0 +1,808 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_aesni_mb_pmd_private.h"
+
+typedef void (*hash_one_block_t)(const void *data, void *digest);
+typedef void (*aes_keyexp_t)(const void *key, void *enc_exp_keys, void *dec_exp_keys);
+
+/**
+ * Calculate the authentication pre-computes
+ *
+ * @param one_block_hash Function pointer to calculate digest on ipad/opad
+ * @param ipad Inner pad output byte array
+ * @param opad Outer pad output byte array
+ * @param hkey Authentication key
+ * @param hkey_len Authentication key length
+ * @param blocksize Block size of selected hash algo
+ */
+static void
+calculate_auth_precomputes(hash_one_block_t one_block_hash,
+ uint8_t *ipad, uint8_t *opad,
+ uint8_t *hkey, uint16_t hkey_len,
+ uint16_t blocksize)
+{
+ unsigned i, length;
+
+ uint8_t ipad_buf[blocksize] __rte_aligned(16);
+ uint8_t opad_buf[blocksize] __rte_aligned(16);
+
+ /* Setup inner and outer pads */
+ memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
+ memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
+
+ /* XOR hash key with inner and outer pads */
+ length = hkey_len > blocksize ? blocksize : hkey_len;
+
+ for (i = 0; i < length; i++) {
+ ipad_buf[i] ^= hkey[i];
+ opad_buf[i] ^= hkey[i];
+ }
+
+ /* Compute partial hashes */
+ (*one_block_hash)(ipad_buf, ipad);
+ (*one_block_hash)(opad_buf, opad);
+
+ /* Clean up stack */
+ memset(ipad_buf, 0, blocksize);
+ memset(opad_buf, 0, blocksize);
+}
+
+/** Get xform chain order */
+static enum aesni_mb_operation
+aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return AESNI_MB_OP_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return AESNI_MB_OP_CIPHER_ONLY;
+ if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return AESNI_MB_OP_CIPHER_HASH;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return AESNI_MB_OP_HASH_ONLY;
+ if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return AESNI_MB_OP_HASH_CIPHER;
+ }
+
+ return AESNI_MB_OP_NOT_SUPPORTED;
+}
+
+/** Set session authentication parameters */
+static int
+aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
+ struct aesni_mb_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ hash_one_block_t hash_oneblock_fn;
+
+ if (xform == NULL) {
+ sess->auth.algo = NULL_HASH;
+ return 0;
+ }
+
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
+ MB_LOG_ERR("Crypto xform struct not of type auth");
+ return -1;
+ }
+
+ /* Select auth generate/verify */
+ sess->auth.operation = xform->auth.op;
+
+ /* Set Authentication Parameters */
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
+ sess->auth.algo = AES_XCBC;
+ (*mb_ops->aux.keyexp.aes_xcbc)(xform->auth.key.data,
+ sess->auth.xcbc.k1_expanded,
+ sess->auth.xcbc.k2, sess->auth.xcbc.k3);
+ return 0;
+ }
+
+ switch (xform->auth.algo) {
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ sess->auth.algo = MD5;
+ hash_oneblock_fn = mb_ops->aux.one_block.md5;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ sess->auth.algo = SHA1;
+ hash_oneblock_fn = mb_ops->aux.one_block.sha1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ sess->auth.algo = SHA_224;
+ hash_oneblock_fn = mb_ops->aux.one_block.sha224;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ sess->auth.algo = SHA_256;
+ hash_oneblock_fn = mb_ops->aux.one_block.sha256;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ sess->auth.algo = SHA_384;
+ hash_oneblock_fn = mb_ops->aux.one_block.sha384;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ sess->auth.algo = SHA_512;
+ hash_oneblock_fn = mb_ops->aux.one_block.sha512;
+ break;
+ default:
+ MB_LOG_ERR("Unsupported authentication algorithm selection");
+ return -1;
+ }
+
+ /* Calculate Authentication precomputes */
+ calculate_auth_precomputes(hash_oneblock_fn,
+ sess->auth.pads.inner, sess->auth.pads.outer,
+ xform->auth.key.data,
+ xform->auth.key.length,
+ get_auth_algo_blocksize(sess->auth.algo));
+
+ return 0;
+}
+
+/** Set session cipher parameters */
+static int
+aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
+ struct aesni_mb_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ aes_keyexp_t aes_keyexp_fn;
+
+ if (xform == NULL) {
+ sess->cipher.mode = NULL_CIPHER;
+ return 0;
+ }
+
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ MB_LOG_ERR("Crypto xform struct not of type cipher");
+ return -1;
+ }
+
+ /* Select cipher direction */
+ switch (xform->cipher.op) {
+ case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
+ sess->cipher.direction = ENCRYPT;
+ break;
+ case RTE_CRYPTO_CIPHER_OP_DECRYPT:
+ sess->cipher.direction = DECRYPT;
+ break;
+ default:
+ MB_LOG_ERR("Unsupported cipher operation parameter");
+ return -1;
+ }
+
+ /* Select cipher mode */
+ switch (xform->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ sess->cipher.mode = CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ sess->cipher.mode = CNTR;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
+ sess->cipher.mode = DOCSIS_SEC_BPI;
+ break;
+ default:
+ MB_LOG_ERR("Unsupported cipher mode parameter");
+ return -1;
+ }
+
+ /* Check key length and choose key expansion function */
+ switch (xform->cipher.key.length) {
+ case AES_128_BYTES:
+ sess->cipher.key_length_in_bytes = AES_128_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
+ break;
+ case AES_192_BYTES:
+ sess->cipher.key_length_in_bytes = AES_192_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes192;
+ break;
+ case AES_256_BYTES:
+ sess->cipher.key_length_in_bytes = AES_256_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
+ break;
+ default:
+ MB_LOG_ERR("Unsupported cipher key length");
+ return -1;
+ }
+
+ /* Expanded cipher keys */
+ (*aes_keyexp_fn)(xform->cipher.key.data,
+ sess->cipher.expanded_aes_keys.encode,
+ sess->cipher.expanded_aes_keys.decode);
+
+ return 0;
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
+ struct aesni_mb_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+
+ /* Select Crypto operation - hash then cipher / cipher then hash */
+ switch (aesni_mb_get_chain_order(xform)) {
+ case AESNI_MB_OP_HASH_CIPHER:
+ sess->chain_order = HASH_CIPHER;
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ break;
+ case AESNI_MB_OP_CIPHER_HASH:
+ sess->chain_order = CIPHER_HASH;
+ auth_xform = xform->next;
+ cipher_xform = xform;
+ break;
+ case AESNI_MB_OP_HASH_ONLY:
+ sess->chain_order = HASH_CIPHER;
+ auth_xform = xform;
+ cipher_xform = NULL;
+ break;
+ case AESNI_MB_OP_CIPHER_ONLY:
+ /*
+ * Multi buffer library operates only at two modes,
+ * CIPHER_HASH and HASH_CIPHER. When doing ciphering only,
+ * chain order depends on cipher operation: encryption is always
+ * the first operation and decryption the last one.
+ */
+ if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ sess->chain_order = CIPHER_HASH;
+ else
+ sess->chain_order = HASH_CIPHER;
+ auth_xform = NULL;
+ cipher_xform = xform;
+ break;
+ case AESNI_MB_OP_NOT_SUPPORTED:
+ default:
+ MB_LOG_ERR("Unsupported operation chain order parameter");
+ return -1;
+ }
+
+ if (aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform)) {
+ MB_LOG_ERR("Invalid/unsupported authentication parameters");
+ return -1;
+ }
+
+ if (aesni_mb_set_session_cipher_parameters(mb_ops, sess,
+ cipher_xform)) {
+ MB_LOG_ERR("Invalid/unsupported cipher parameters");
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * burst enqueue, place crypto operations on ingress queue for processing.
+ *
+ * @param __qp Queue Pair to process
+ * @param ops Crypto operations for processing
+ * @param nb_ops Number of crypto operations for processing
+ *
+ * @return
+ * - Number of crypto operations enqueued
+ */
+static uint16_t
+aesni_mb_pmd_enqueue_burst(void *__qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct aesni_mb_qp *qp = __qp;
+
+ unsigned int nb_enqueued;
+
+ nb_enqueued = rte_ring_enqueue_burst(qp->ingress_queue,
+ (void **)ops, nb_ops, NULL);
+
+ qp->stats.enqueued_count += nb_enqueued;
+
+ return nb_enqueued;
+}
+
+/** Get multi buffer session */
+static inline struct aesni_mb_session *
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
+{
+ struct aesni_mb_session *sess = NULL;
+
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session->dev_type !=
+ RTE_CRYPTODEV_AESNI_MB_PMD)) {
+ return NULL;
+ }
+
+ sess = (struct aesni_mb_session *)op->sym->session->_private;
+ } else {
+ void *_sess = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ sess = (struct aesni_mb_session *)
+ ((struct rte_cryptodev_sym_session *)_sess)->_private;
+
+ if (unlikely(aesni_mb_set_session_parameters(qp->op_fns,
+ sess, op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ }
+
+ return sess;
+}
+
+/**
+ * Process a crypto operation and complete a JOB_AES_HMAC job structure for
+ * submission to the multi buffer library for processing.
+ *
+ * @param qp queue pair
+ * @param job JOB_AES_HMAC structure to fill
+ * @param m mbuf to process
+ *
+ * @return
+ * - Completed JOB_AES_HMAC structure pointer on success
+ * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
+ */
+static inline int
+set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
+ struct rte_crypto_op *op)
+{
+ struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
+ struct aesni_mb_session *session;
+ uint16_t m_offset = 0;
+
+ session = get_session(qp, op);
+ if (session == NULL) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -1;
+ }
+ op->status = RTE_CRYPTO_OP_STATUS_ENQUEUED;
+
+ /* Set crypto operation */
+ job->chain_order = session->chain_order;
+
+ /* Set cipher parameters */
+ job->cipher_direction = session->cipher.direction;
+ job->cipher_mode = session->cipher.mode;
+
+ job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
+ job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
+ job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode;
+
+
+ /* Set authentication parameters */
+ job->hash_alg = session->auth.algo;
+ if (job->hash_alg == AES_XCBC) {
+ job->_k1_expanded = session->auth.xcbc.k1_expanded;
+ job->_k2 = session->auth.xcbc.k2;
+ job->_k3 = session->auth.xcbc.k3;
+ } else {
+ job->hashed_auth_key_xor_ipad = session->auth.pads.inner;
+ job->hashed_auth_key_xor_opad = session->auth.pads.outer;
+ }
+
+ /* Mutable crypto operation parameters */
+ if (op->sym->m_dst) {
+ m_src = m_dst = op->sym->m_dst;
+
+ /* append space for output data to mbuf */
+ char *odata = rte_pktmbuf_append(m_dst,
+ rte_pktmbuf_data_len(op->sym->m_src));
+ if (odata == NULL) {
+ MB_LOG_ERR("failed to allocate space in destination "
+ "mbuf for source data");
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return -1;
+ }
+
+ memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ } else {
+ m_dst = m_src;
+ m_offset = op->sym->cipher.data.offset;
+ }
+
+ /* Set digest output location */
+ if (job->hash_alg != NULL_HASH &&
+ session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
+ get_digest_byte_length(job->hash_alg));
+
+ if (job->auth_tag_output == NULL) {
+ MB_LOG_ERR("failed to allocate space in output mbuf "
+ "for temp digest");
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return -1;
+ }
+
+ memset(job->auth_tag_output, 0,
+ sizeof(get_digest_byte_length(job->hash_alg)));
+
+ } else {
+ job->auth_tag_output = op->sym->auth.digest.data;
+ }
+
+ /*
+ * Multi-buffer library current only support returning a truncated
+ * digest length as specified in the relevant IPsec RFCs
+ */
+ job->auth_tag_output_len_in_bytes =
+ get_truncated_digest_byte_length(job->hash_alg);
+
+ /* Set IV parameters */
+ job->iv = op->sym->cipher.iv.data;
+ job->iv_len_in_bytes = op->sym->cipher.iv.length;
+
+ /* Data Parameter */
+ job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
+
+ job->cipher_start_src_offset_in_bytes = op->sym->cipher.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
+
+ job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
+ job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
+
+ /* Set user data to be crypto operation data struct */
+ job->user_data = op;
+ job->user_data2 = m_dst;
+
+ return 0;
+}
+
+static inline void
+verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op) {
+ struct rte_mbuf *m_dst = (struct rte_mbuf *)job->user_data2;
+
+ RTE_ASSERT(m_dst == NULL);
+
+ /* Verify digest if required */
+ if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
+ job->auth_tag_output_len_in_bytes) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+ /* trim area used for digest from mbuf */
+ rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
+}
+
+/**
+ * Process a completed job and return rte_mbuf which job processed
+ *
+ * @param job JOB_AES_HMAC job to process
+ *
+ * @return
+ * - Returns processed mbuf which is trimmed of output digest used in
+ * verification of supplied digest in the case of a HASH_CIPHER operation
+ * - Returns NULL on invalid job
+ */
+static inline struct rte_crypto_op *
+post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
+{
+ struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
+
+ struct aesni_mb_session *sess;
+
+ RTE_ASSERT(op == NULL);
+
+ if (unlikely(op->status == RTE_CRYPTO_OP_STATUS_ENQUEUED)) {
+ switch (job->status) {
+ case STS_COMPLETED:
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ if (job->hash_alg != NULL_HASH) {
+ sess = (struct aesni_mb_session *)
+ op->sym->session->_private;
+
+ if (sess->auth.operation ==
+ RTE_CRYPTO_AUTH_OP_VERIFY)
+ verify_digest(job, op);
+ }
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+ }
+
+ /* Free session if a session-less crypto op */
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ return op;
+}
+
+/**
+ * Process a completed JOB_AES_HMAC job and keep processing jobs until
+ * get_completed_job return NULL
+ *
+ * @param qp Queue Pair to process
+ * @param job JOB_AES_HMAC job
+ *
+ * @return
+ * - Number of processed jobs
+ */
+static unsigned
+handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rte_crypto_op *op = NULL;
+ unsigned processed_jobs = 0;
+
+ while (job != NULL && processed_jobs < nb_ops) {
+ op = post_process_mb_job(qp, job);
+
+ if (op) {
+ ops[processed_jobs++] = op;
+ qp->stats.dequeued_count++;
+ } else {
+ qp->stats.dequeue_err_count++;
+ break;
+ }
+
+ job = (*qp->op_fns->job.get_completed_job)(&qp->mb_mgr);
+ }
+
+ return processed_jobs;
+}
+
+static inline uint16_t
+flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ int processed_ops = 0;
+
+ /* Flush the remaining jobs */
+ JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(&qp->mb_mgr);
+
+ if (job)
+ processed_ops += handle_completed_jobs(qp, job,
+ &ops[processed_ops], nb_ops - processed_ops);
+
+ return processed_ops;
+}
+
+static inline JOB_AES_HMAC *
+set_job_null_op(JOB_AES_HMAC *job)
+{
+ job->chain_order = HASH_CIPHER;
+ job->cipher_mode = NULL_CIPHER;
+ job->hash_alg = NULL_HASH;
+ job->cipher_direction = DECRYPT;
+
+ return job;
+}
+
+static uint16_t
+aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct aesni_mb_qp *qp = queue_pair;
+
+ struct rte_crypto_op *op;
+ JOB_AES_HMAC *job;
+
+ int retval, processed_jobs = 0;
+
+ do {
+ /* Get next operation to process from ingress queue */
+ retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
+ if (retval < 0)
+ break;
+
+ /* Get next free mb job struct from mb manager */
+ job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
+ if (unlikely(job == NULL)) {
+ /* if no free mb job structs we need to flush mb_mgr */
+ processed_jobs += flush_mb_mgr(qp,
+ &ops[processed_jobs],
+ (nb_ops - processed_jobs) - 1);
+
+ job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
+ }
+
+ retval = set_mb_job_params(job, qp, op);
+ if (unlikely(retval != 0)) {
+ qp->stats.dequeue_err_count++;
+ set_job_null_op(job);
+ }
+
+ /* Submit job to multi-buffer for processing */
+ job = (*qp->op_fns->job.submit)(&qp->mb_mgr);
+
+ /*
+ * If submit returns a processed job then handle it,
+ * before submitting subsequent jobs
+ */
+ if (job)
+ processed_jobs += handle_completed_jobs(qp, job,
+ &ops[processed_jobs],
+ nb_ops - processed_jobs);
+
+ } while (processed_jobs < nb_ops);
+
+ if (processed_jobs < 1)
+ processed_jobs += flush_mb_mgr(qp,
+ &ops[processed_jobs],
+ nb_ops - processed_jobs);
+
+ return processed_jobs;
+}
+
+static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
+
+static int
+cryptodev_aesni_mb_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_crypto_vdev_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct aesni_mb_private *internals;
+ enum aesni_mb_vector_mode vector_mode;
+
+ if (init_params->name[0] == '\0')
+ snprintf(init_params->name, sizeof(init_params->name),
+ "%s", name);
+
+ /* Check CPU for supported vector instruction set */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ vector_mode = RTE_AESNI_MB_AVX512;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ vector_mode = RTE_AESNI_MB_AVX2;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+ vector_mode = RTE_AESNI_MB_AVX;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+ vector_mode = RTE_AESNI_MB_SSE;
+ else {
+ MB_LOG_ERR("Vector instructions are not supported by CPU");
+ return -EFAULT;
+ }
+
+ dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
+ sizeof(struct aesni_mb_private), init_params->socket_id);
+ if (dev == NULL) {
+ MB_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->dev_type = RTE_CRYPTODEV_AESNI_MB_PMD;
+ dev->dev_ops = rte_aesni_mb_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
+ dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_CPU_AESNI;
+
+ switch (vector_mode) {
+ case RTE_AESNI_MB_SSE:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+ break;
+ case RTE_AESNI_MB_AVX:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+ break;
+ case RTE_AESNI_MB_AVX2:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+ break;
+ case RTE_AESNI_MB_AVX512:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX512;
+ break;
+ default:
+ break;
+ }
+
+ /* Set vector instructions mode supported */
+ internals = dev->data->dev_private;
+
+ internals->vector_mode = vector_mode;
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ return 0;
+init_error:
+ MB_LOG_ERR("driver %s: cryptodev_aesni_create failed",
+ init_params->name);
+
+ cryptodev_aesni_mb_remove(vdev);
+ return -EFAULT;
+}
+
+static int
+cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_crypto_vdev_init_params init_params = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id(),
+ ""
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+ rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.socket_id);
+ if (init_params.name[0] != '\0')
+ RTE_LOG(INFO, PMD, " User defined name = %s\n",
+ init_params.name);
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.max_nb_sessions);
+
+ return cryptodev_aesni_mb_create(name, vdev, &init_params);
+}
+
+static int
+cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Closing AESNI crypto device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
+ .probe = cryptodev_aesni_mb_probe,
+ .remove = cryptodev_aesni_mb_remove
+};
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int>");
diff --git a/src/seastar/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/src/seastar/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
new file mode 100644
index 00000000..d1bc28e0
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -0,0 +1,529 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_aesni_mb_pmd_private.h"
+
+
+static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 14,
+ .max = 14,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 128,
+ .max = 128,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 128,
+ .max = 128,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES XCBC HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES DOCSIS BPI */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/** Configure device */
+static int
+aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+aesni_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+aesni_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+aesni_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ }
+}
+
+
+/** Get device info */
+static void
+aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct aesni_mb_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->dev_type = dev->dev_type;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = aesni_mb_pmd_capabilities;
+ dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ }
+}
+
+/** Release queue pair */
+static int
+aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+ struct rte_ring *r = NULL;
+
+ if (qp != NULL) {
+ r = rte_ring_lookup(qp->name);
+ if (r)
+ rte_ring_free(r);
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct aesni_mb_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "aesni_mb_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place processed operations on */
+static struct rte_ring *
+aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
+ const char *str, unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+ char ring_name[RTE_CRYPTODEV_NAME_LEN];
+
+ unsigned int n = snprintf(ring_name, sizeof(ring_name),
+ "%s_%s",
+ qp->name, str);
+
+ if (n > sizeof(ring_name))
+ return NULL;
+
+ r = rte_ring_lookup(ring_name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ MB_LOG_INFO("Reusing existing ring %s for processed ops",
+ ring_name);
+ return r;
+ }
+
+ MB_LOG_ERR("Unable to reuse existing ring %s for processed ops",
+ ring_name);
+ return NULL;
+ }
+
+ return rte_ring_create(ring_name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct aesni_mb_qp *qp = NULL;
+ struct aesni_mb_private *internals = dev->data->dev_private;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ aesni_mb_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return -ENOMEM;
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+
+ qp->op_fns = &job_ops[internals->vector_mode];
+
+ qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
+ "ingress", qp_conf->nb_descriptors, socket_id);
+ if (qp->ingress_queue == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = dev->data->session_pool;
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+
+ /* Initialise multi-buffer manager */
+ (*qp->op_fns->job.init_mgr)(&qp->mb_mgr);
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Start queue pair */
+static int
+aesni_mb_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+aesni_mb_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+aesni_mb_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni multi-buffer session structure */
+static unsigned
+aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct aesni_mb_session);
+}
+
+/** Configure a aesni multi-buffer session from a crypto xform chain */
+static void *
+aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ struct aesni_mb_private *internals = dev->data->dev_private;
+
+ if (unlikely(sess == NULL)) {
+ MB_LOG_ERR("invalid session struct");
+ return NULL;
+ }
+
+ if (aesni_mb_set_session_parameters(&job_ops[internals->vector_mode],
+ sess, xform) != 0) {
+ MB_LOG_ERR("failed configure session parameters");
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+aesni_mb_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+ /*
+ * Current just resetting the whole data structure, need to investigate
+ * whether a more selective reset of key would be more performant
+ */
+ if (sess)
+ memset(sess, 0, sizeof(struct aesni_mb_session));
+}
+
+struct rte_cryptodev_ops aesni_mb_pmd_ops = {
+ .dev_configure = aesni_mb_pmd_config,
+ .dev_start = aesni_mb_pmd_start,
+ .dev_stop = aesni_mb_pmd_stop,
+ .dev_close = aesni_mb_pmd_close,
+
+ .stats_get = aesni_mb_pmd_stats_get,
+ .stats_reset = aesni_mb_pmd_stats_reset,
+
+ .dev_infos_get = aesni_mb_pmd_info_get,
+
+ .queue_pair_setup = aesni_mb_pmd_qp_setup,
+ .queue_pair_release = aesni_mb_pmd_qp_release,
+ .queue_pair_start = aesni_mb_pmd_qp_start,
+ .queue_pair_stop = aesni_mb_pmd_qp_stop,
+ .queue_pair_count = aesni_mb_pmd_qp_count,
+
+ .session_get_size = aesni_mb_pmd_session_get_size,
+ .session_configure = aesni_mb_pmd_session_configure,
+ .session_clear = aesni_mb_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;
diff --git a/src/seastar/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/src/seastar/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
new file mode 100644
index 00000000..0d82699c
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -0,0 +1,240 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_AESNI_MB_PMD_PRIVATE_H_
+#define _RTE_AESNI_MB_PMD_PRIVATE_H_
+
+#include "aesni_mb_ops.h"
+
+#define MB_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
+#define MB_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ CRYPTODEV_NAME_AESNI_MB_PMD, \
+ __func__, __LINE__, ## args)
+
+#define MB_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ CRYPTODEV_NAME_AESNI_MB_PMD, \
+ __func__, __LINE__, ## args)
+#else
+#define MB_LOG_INFO(fmt, args...)
+#define MB_LOG_DBG(fmt, args...)
+#endif
+
+#define HMAC_IPAD_VALUE (0x36)
+#define HMAC_OPAD_VALUE (0x5C)
+
+static const unsigned auth_blocksize[] = {
+ [MD5] = 64,
+ [SHA1] = 64,
+ [SHA_224] = 64,
+ [SHA_256] = 64,
+ [SHA_384] = 128,
+ [SHA_512] = 128,
+ [AES_XCBC] = 16,
+};
+
+/**
+ * Get the blocksize in bytes for a specified authentication algorithm
+ *
+ * @Note: this function will not return a valid value for a non-valid
+ * authentication algorithm
+ */
+static inline unsigned
+get_auth_algo_blocksize(JOB_HASH_ALG algo)
+{
+ return auth_blocksize[algo];
+}
+
+static const unsigned auth_truncated_digest_byte_lengths[] = {
+ [MD5] = 12,
+ [SHA1] = 12,
+ [SHA_224] = 14,
+ [SHA_256] = 16,
+ [SHA_384] = 24,
+ [SHA_512] = 32,
+ [AES_XCBC] = 12,
+ [NULL_HASH] = 0
+};
+
+/**
+ * Get the IPsec specified truncated length in bytes of the HMAC digest for a
+ * specified authentication algorithm
+ *
+ * @Note: this function will not return a valid value for a non-valid
+ * authentication algorithm
+ */
+static inline unsigned
+get_truncated_digest_byte_length(JOB_HASH_ALG algo)
+{
+ return auth_truncated_digest_byte_lengths[algo];
+}
+
+static const unsigned auth_digest_byte_lengths[] = {
+ [MD5] = 16,
+ [SHA1] = 20,
+ [SHA_224] = 28,
+ [SHA_256] = 32,
+ [SHA_384] = 48,
+ [SHA_512] = 64,
+ [AES_XCBC] = 16,
+ [NULL_HASH] = 0
+};
+
+/**
+ * Get the output digest size in bytes for a specified authentication algorithm
+ *
+ * @Note: this function will not return a valid value for a non-valid
+ * authentication algorithm
+ */
+static inline unsigned
+get_digest_byte_length(JOB_HASH_ALG algo)
+{
+ return auth_digest_byte_lengths[algo];
+}
+
+enum aesni_mb_operation {
+ AESNI_MB_OP_HASH_CIPHER,
+ AESNI_MB_OP_CIPHER_HASH,
+ AESNI_MB_OP_HASH_ONLY,
+ AESNI_MB_OP_CIPHER_ONLY,
+ AESNI_MB_OP_NOT_SUPPORTED
+};
+
+/** private data structure for each virtual AESNI device */
+struct aesni_mb_private {
+ enum aesni_mb_vector_mode vector_mode;
+ /**< CPU vector instruction set mode */
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+ unsigned max_nb_sessions;
+ /**< Max number of sessions supported by device */
+};
+
+/** AESNI Multi buffer queue pair */
+struct aesni_mb_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
+ const struct aesni_mb_op_fns *op_fns;
+ /**< Vector mode dependent pointer table of the multi-buffer APIs */
+ MB_MGR mb_mgr;
+ /**< Multi-buffer instance */
+ struct rte_ring *ingress_queue;
+ /**< Ring for placing operations ready for processing */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats stats;
+ /**< Queue pair statistics */
+} __rte_cache_aligned;
+
+
+/** AES-NI multi-buffer private session structure */
+struct aesni_mb_session {
+ JOB_CHAIN_ORDER chain_order;
+
+ /** Cipher Parameters */
+ struct {
+ /** Cipher direction - encrypt / decrypt */
+ JOB_CIPHER_DIRECTION direction;
+ /** Cipher mode - CBC / Counter */
+ JOB_CIPHER_MODE mode;
+
+ uint64_t key_length_in_bytes;
+
+ struct {
+ uint32_t encode[60] __rte_aligned(16);
+ /**< encode key */
+ uint32_t decode[60] __rte_aligned(16);
+ /**< decode key */
+ } expanded_aes_keys;
+ /**< Expanded AES keys - Allocating space to
+ * contain the maximum expanded key size which
+ * is 240 bytes for 256 bit AES, calculate by:
+ * ((key size (bytes)) *
+ * ((number of rounds) + 1))
+ */
+ } cipher;
+
+ /** Authentication Parameters */
+ struct {
+ JOB_HASH_ALG algo; /**< Authentication Algorithm */
+ enum rte_crypto_auth_operation operation;
+ /**< auth operation generate or verify */
+ union {
+ struct {
+ uint8_t inner[128] __rte_aligned(16);
+ /**< inner pad */
+ uint8_t outer[128] __rte_aligned(16);
+ /**< outer pad */
+ } pads;
+ /**< HMAC Authentication pads -
+ * allocating space for the maximum pad
+ * size supported which is 128 bytes for
+ * SHA512
+ */
+
+ struct {
+ uint32_t k1_expanded[44] __rte_aligned(16);
+ /**< k1 (expanded key). */
+ uint8_t k2[16] __rte_aligned(16);
+ /**< k2. */
+ uint8_t k3[16] __rte_aligned(16);
+ /**< k3. */
+ } xcbc;
+ /**< Expanded XCBC authentication keys */
+ };
+ } auth;
+} __rte_cache_aligned;
+
+
+/**
+ *
+ */
+extern int
+aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
+ struct aesni_mb_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops;
+
+
+
+#endif /* _RTE_AESNI_MB_PMD_PRIVATE_H_ */
diff --git a/src/seastar/dpdk/drivers/crypto/aesni_mb/rte_pmd_aesni_version.map b/src/seastar/dpdk/drivers/crypto/aesni_mb/rte_pmd_aesni_version.map
new file mode 100644
index 00000000..ad607bbe
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/aesni_mb/rte_pmd_aesni_version.map
@@ -0,0 +1,3 @@
+DPDK_2.2 {
+ local: *;
+};
diff --git a/src/seastar/dpdk/drivers/crypto/armv8/Makefile b/src/seastar/dpdk/drivers/crypto/armv8/Makefile
new file mode 100644
index 00000000..1474951c
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/armv8/Makefile
@@ -0,0 +1,65 @@
+#
+# BSD LICENSE
+#
+# Copyright (C) Cavium networks Ltd. 2017.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Cavium networks nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),config)
+ifeq ($(ARMV8_CRYPTO_LIB_PATH),)
+$(error "Please define ARMV8_CRYPTO_LIB_PATH environment variable")
+endif
+endif
+endif
+
+# library name
+LIB = librte_pmd_armv8.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_armv8_pmd_version.map
+
+# external library dependencies
+CFLAGS += -I$(ARMV8_CRYPTO_LIB_PATH)
+CFLAGS += -I$(ARMV8_CRYPTO_LIB_PATH)/asm/include
+LDLIBS += -L$(ARMV8_CRYPTO_LIB_PATH) -larmv8_crypto
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += rte_armv8_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += rte_armv8_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/seastar/dpdk/drivers/crypto/armv8/rte_armv8_pmd.c b/src/seastar/dpdk/drivers/crypto/armv8/rte_armv8_pmd.c
new file mode 100644
index 00000000..3d603a5a
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/armv8/rte_armv8_pmd.c
@@ -0,0 +1,906 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "armv8_crypto_defs.h"
+
+#include "rte_armv8_pmd_private.h"
+
+static int cryptodev_armv8_crypto_uninit(struct rte_vdev_device *vdev);
+
+/**
+ * Pointers to the supported combined mode crypto functions are stored
+ * in the static tables. Each combined (chained) cryptographic operation
+ * can be described by a set of numbers:
+ * - order: order of operations (cipher, auth) or (auth, cipher)
+ * - direction: encryption or decryption
+ * - calg: cipher algorithm such as AES_CBC, AES_CTR, etc.
+ * - aalg: authentication algorithm such as SHA1, SHA256, etc.
+ * - keyl: cipher key length, for example 128, 192, 256 bits
+ *
+ * In order to quickly acquire each function pointer based on those numbers,
+ * a hierarchy of arrays is maintained. The final level, 3D array is indexed
+ * by the combined mode function parameters only (cipher algorithm,
+ * authentication algorithm and key length).
+ *
+ * This gives 3 memory accesses to obtain a function pointer instead of
+ * traversing the array manually and comparing function parameters on each loop.
+ *
+ * +--+CRYPTO_FUNC
+ * +--+ENC|
+ * +--+CA|
+ * | +--+DEC
+ * ORDER|
+ * | +--+ENC
+ * +--+AC|
+ * +--+DEC
+ *
+ */
+
+/**
+ * 3D array type for ARM Combined Mode crypto functions pointers.
+ * CRYPTO_CIPHER_MAX: max cipher ID number
+ * CRYPTO_AUTH_MAX: max auth ID number
+ * CRYPTO_CIPHER_KEYLEN_MAX: max key length ID number
+ */
+typedef const crypto_func_t
+crypto_func_tbl_t[CRYPTO_CIPHER_MAX][CRYPTO_AUTH_MAX][CRYPTO_CIPHER_KEYLEN_MAX];
+
+/* Evaluate to key length definition */
+#define KEYL(keyl) (ARMV8_CRYPTO_CIPHER_KEYLEN_ ## keyl)
+
+/* Local aliases for supported ciphers */
+#define CIPH_AES_CBC RTE_CRYPTO_CIPHER_AES_CBC
+/* Local aliases for supported hashes */
+#define AUTH_SHA1_HMAC RTE_CRYPTO_AUTH_SHA1_HMAC
+#define AUTH_SHA256_HMAC RTE_CRYPTO_AUTH_SHA256_HMAC
+
+/**
+ * Arrays containing pointers to particular cryptographic,
+ * combined mode functions.
+ * crypto_op_ca_encrypt: cipher (encrypt), authenticate
+ * crypto_op_ca_decrypt: cipher (decrypt), authenticate
+ * crypto_op_ac_encrypt: authenticate, cipher (encrypt)
+ * crypto_op_ac_decrypt: authenticate, cipher (decrypt)
+ */
+static const crypto_func_tbl_t
+crypto_op_ca_encrypt = {
+ /* [cipher alg][auth alg][key length] = crypto_function, */
+ [CIPH_AES_CBC][AUTH_SHA1_HMAC][KEYL(128)] = aes128cbc_sha1_hmac,
+ [CIPH_AES_CBC][AUTH_SHA256_HMAC][KEYL(128)] = aes128cbc_sha256_hmac,
+};
+
+static const crypto_func_tbl_t
+crypto_op_ca_decrypt = {
+ NULL
+};
+
+static const crypto_func_tbl_t
+crypto_op_ac_encrypt = {
+ NULL
+};
+
+static const crypto_func_tbl_t
+crypto_op_ac_decrypt = {
+ /* [cipher alg][auth alg][key length] = crypto_function, */
+ [CIPH_AES_CBC][AUTH_SHA1_HMAC][KEYL(128)] = sha1_hmac_aes128cbc_dec,
+ [CIPH_AES_CBC][AUTH_SHA256_HMAC][KEYL(128)] = sha256_hmac_aes128cbc_dec,
+};
+
+/**
+ * Arrays containing pointers to particular cryptographic function sets,
+ * covering given cipher operation directions (encrypt, decrypt)
+ * for each order of cipher and authentication pairs.
+ */
+static const crypto_func_tbl_t *
+crypto_cipher_auth[] = {
+ &crypto_op_ca_encrypt,
+ &crypto_op_ca_decrypt,
+ NULL
+};
+
+static const crypto_func_tbl_t *
+crypto_auth_cipher[] = {
+ &crypto_op_ac_encrypt,
+ &crypto_op_ac_decrypt,
+ NULL
+};
+
+/**
+ * Top level array containing pointers to particular cryptographic
+ * function sets, covering given order of chained operations.
+ * crypto_cipher_auth: cipher first, authenticate after
+ * crypto_auth_cipher: authenticate first, cipher after
+ */
+static const crypto_func_tbl_t **
+crypto_chain_order[] = {
+ crypto_cipher_auth,
+ crypto_auth_cipher,
+ NULL
+};
+
+/**
+ * Extract particular combined mode crypto function from the 3D array.
+ */
+#define CRYPTO_GET_ALGO(order, cop, calg, aalg, keyl) \
+({ \
+ crypto_func_tbl_t *func_tbl = \
+ (crypto_chain_order[(order)])[(cop)]; \
+ \
+ ((*func_tbl)[(calg)][(aalg)][KEYL(keyl)]); \
+})
+
+/*----------------------------------------------------------------------------*/
+
+/**
+ * 2D array type for ARM key schedule functions pointers.
+ * CRYPTO_CIPHER_MAX: max cipher ID number
+ * CRYPTO_CIPHER_KEYLEN_MAX: max key length ID number
+ */
+typedef const crypto_key_sched_t
+crypto_key_sched_tbl_t[CRYPTO_CIPHER_MAX][CRYPTO_CIPHER_KEYLEN_MAX];
+
+static const crypto_key_sched_tbl_t
+crypto_key_sched_encrypt = {
+ /* [cipher alg][key length] = key_expand_func, */
+ [CIPH_AES_CBC][KEYL(128)] = aes128_key_sched_enc,
+};
+
+static const crypto_key_sched_tbl_t
+crypto_key_sched_decrypt = {
+ /* [cipher alg][key length] = key_expand_func, */
+ [CIPH_AES_CBC][KEYL(128)] = aes128_key_sched_dec,
+};
+
+/**
+ * Top level array containing pointers to particular key generation
+ * function sets, covering given operation direction.
+ * crypto_key_sched_encrypt: keys for encryption
+ * crypto_key_sched_decrypt: keys for decryption
+ */
+static const crypto_key_sched_tbl_t *
+crypto_key_sched_dir[] = {
+ &crypto_key_sched_encrypt,
+ &crypto_key_sched_decrypt,
+ NULL
+};
+
+/**
+ * Extract particular combined mode crypto function from the 3D array.
+ */
+#define CRYPTO_GET_KEY_SCHED(cop, calg, keyl) \
+({ \
+ crypto_key_sched_tbl_t *ks_tbl = crypto_key_sched_dir[(cop)]; \
+ \
+ ((*ks_tbl)[(calg)][KEYL(keyl)]); \
+})
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ *------------------------------------------------------------------------------
+ * Session Prepare
+ *------------------------------------------------------------------------------
+ */
+
+/** Get xform chain order */
+static enum armv8_crypto_chain_order
+armv8_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+
+ /*
+ * This driver currently covers only chained operations.
+ * Ignore only cipher or only authentication operations
+ * or chains longer than 2 xform structures.
+ */
+ if (xform->next == NULL || xform->next->next != NULL)
+ return ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return ARMV8_CRYPTO_CHAIN_AUTH_CIPHER;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return ARMV8_CRYPTO_CHAIN_CIPHER_AUTH;
+ }
+
+ return ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED;
+}
+
+static inline void
+auth_hmac_pad_prepare(struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ size_t i;
+
+ /* Generate i_key_pad and o_key_pad */
+ memset(sess->auth.hmac.i_key_pad, 0, sizeof(sess->auth.hmac.i_key_pad));
+ rte_memcpy(sess->auth.hmac.i_key_pad, sess->auth.hmac.key,
+ xform->auth.key.length);
+ memset(sess->auth.hmac.o_key_pad, 0, sizeof(sess->auth.hmac.o_key_pad));
+ rte_memcpy(sess->auth.hmac.o_key_pad, sess->auth.hmac.key,
+ xform->auth.key.length);
+ /*
+ * XOR key with IPAD/OPAD values to obtain i_key_pad
+ * and o_key_pad.
+ * Byte-by-byte operation may seem to be the less efficient
+ * here but in fact it's the opposite.
+ * The result ASM code is likely operate on NEON registers
+ * (load auth key to Qx, load IPAD/OPAD to multiple
+ * elements of Qy, eor 128 bits at once).
+ */
+ for (i = 0; i < SHA_BLOCK_MAX; i++) {
+ sess->auth.hmac.i_key_pad[i] ^= HMAC_IPAD_VALUE;
+ sess->auth.hmac.o_key_pad[i] ^= HMAC_OPAD_VALUE;
+ }
+}
+
+static inline int
+auth_set_prerequisites(struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ uint8_t partial[64] = { 0 };
+ int error;
+
+ switch (xform->auth.algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ /*
+ * Generate authentication key, i_key_pad and o_key_pad.
+ */
+ /* Zero memory under key */
+ memset(sess->auth.hmac.key, 0, SHA1_AUTH_KEY_LENGTH);
+
+ if (xform->auth.key.length > SHA1_AUTH_KEY_LENGTH) {
+ /*
+ * In case the key is longer than 160 bits
+ * the algorithm will use SHA1(key) instead.
+ */
+ error = sha1_block(NULL, xform->auth.key.data,
+ sess->auth.hmac.key, xform->auth.key.length);
+ if (error != 0)
+ return -1;
+ } else {
+ /*
+ * Now copy the given authentication key to the session
+ * key assuming that the session key is zeroed there is
+ * no need for additional zero padding if the key is
+ * shorter than SHA1_AUTH_KEY_LENGTH.
+ */
+ rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
+ xform->auth.key.length);
+ }
+
+ /* Prepare HMAC padding: key|pattern */
+ auth_hmac_pad_prepare(sess, xform);
+ /*
+ * Calculate partial hash values for i_key_pad and o_key_pad.
+ * Will be used as initialization state for final HMAC.
+ */
+ error = sha1_block_partial(NULL, sess->auth.hmac.i_key_pad,
+ partial, SHA1_BLOCK_SIZE);
+ if (error != 0)
+ return -1;
+ memcpy(sess->auth.hmac.i_key_pad, partial, SHA1_BLOCK_SIZE);
+
+ error = sha1_block_partial(NULL, sess->auth.hmac.o_key_pad,
+ partial, SHA1_BLOCK_SIZE);
+ if (error != 0)
+ return -1;
+ memcpy(sess->auth.hmac.o_key_pad, partial, SHA1_BLOCK_SIZE);
+
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ /*
+ * Generate authentication key, i_key_pad and o_key_pad.
+ */
+ /* Zero memory under key */
+ memset(sess->auth.hmac.key, 0, SHA256_AUTH_KEY_LENGTH);
+
+ if (xform->auth.key.length > SHA256_AUTH_KEY_LENGTH) {
+ /*
+ * In case the key is longer than 256 bits
+ * the algorithm will use SHA256(key) instead.
+ */
+ error = sha256_block(NULL, xform->auth.key.data,
+ sess->auth.hmac.key, xform->auth.key.length);
+ if (error != 0)
+ return -1;
+ } else {
+ /*
+ * Now copy the given authentication key to the session
+ * key assuming that the session key is zeroed there is
+ * no need for additional zero padding if the key is
+ * shorter than SHA256_AUTH_KEY_LENGTH.
+ */
+ rte_memcpy(sess->auth.hmac.key, xform->auth.key.data,
+ xform->auth.key.length);
+ }
+
+ /* Prepare HMAC padding: key|pattern */
+ auth_hmac_pad_prepare(sess, xform);
+ /*
+ * Calculate partial hash values for i_key_pad and o_key_pad.
+ * Will be used as initialization state for final HMAC.
+ */
+ error = sha256_block_partial(NULL, sess->auth.hmac.i_key_pad,
+ partial, SHA256_BLOCK_SIZE);
+ if (error != 0)
+ return -1;
+ memcpy(sess->auth.hmac.i_key_pad, partial, SHA256_BLOCK_SIZE);
+
+ error = sha256_block_partial(NULL, sess->auth.hmac.o_key_pad,
+ partial, SHA256_BLOCK_SIZE);
+ if (error != 0)
+ return -1;
+ memcpy(sess->auth.hmac.o_key_pad, partial, SHA256_BLOCK_SIZE);
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static inline int
+cipher_set_prerequisites(struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ crypto_key_sched_t cipher_key_sched;
+
+ cipher_key_sched = sess->cipher.key_sched;
+ if (likely(cipher_key_sched != NULL)) {
+ /* Set up cipher session key */
+ cipher_key_sched(sess->cipher.key.data, xform->cipher.key.data);
+ }
+
+ return 0;
+}
+
+static int
+armv8_crypto_set_session_chained_parameters(struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *cipher_xform,
+ const struct rte_crypto_sym_xform *auth_xform)
+{
+ enum armv8_crypto_chain_order order;
+ enum armv8_crypto_cipher_operation cop;
+ enum rte_crypto_cipher_algorithm calg;
+ enum rte_crypto_auth_algorithm aalg;
+
+ /* Validate and prepare scratch order of combined operations */
+ switch (sess->chain_order) {
+ case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+ case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER:
+ order = sess->chain_order;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* Select cipher direction */
+ sess->cipher.direction = cipher_xform->cipher.op;
+ /* Select cipher key */
+ sess->cipher.key.length = cipher_xform->cipher.key.length;
+ /* Set cipher direction */
+ cop = sess->cipher.direction;
+ /* Set cipher algorithm */
+ calg = cipher_xform->cipher.algo;
+
+ /* Select cipher algo */
+ switch (calg) {
+ /* Cover supported cipher algorithms */
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ sess->cipher.algo = calg;
+ /* IV len is always 16 bytes (block size) for AES CBC */
+ sess->cipher.iv_len = 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* Select auth generate/verify */
+ sess->auth.operation = auth_xform->auth.op;
+
+ /* Select auth algo */
+ switch (auth_xform->auth.algo) {
+ /* Cover supported hash algorithms */
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC: /* Fall through */
+ aalg = auth_xform->auth.algo;
+ sess->auth.mode = ARMV8_CRYPTO_AUTH_AS_HMAC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Verify supported key lengths and extract proper algorithm */
+ switch (cipher_xform->cipher.key.length << 3) {
+ case 128:
+ sess->crypto_func =
+ CRYPTO_GET_ALGO(order, cop, calg, aalg, 128);
+ sess->cipher.key_sched =
+ CRYPTO_GET_KEY_SCHED(cop, calg, 128);
+ break;
+ case 192:
+ case 256:
+ /* These key lengths are not supported yet */
+ default: /* Fall through */
+ sess->crypto_func = NULL;
+ sess->cipher.key_sched = NULL;
+ return -EINVAL;
+ }
+
+ if (unlikely(sess->crypto_func == NULL)) {
+ /*
+ * If we got here that means that there must be a bug
+ * in the algorithms selection above. Nevertheless keep
+ * it here to catch bug immediately and avoid NULL pointer
+ * dereference in OPs processing.
+ */
+ ARMV8_CRYPTO_LOG_ERR(
+ "No appropriate crypto function for given parameters");
+ return -EINVAL;
+ }
+
+ /* Set up cipher session prerequisites */
+ if (cipher_set_prerequisites(sess, cipher_xform) != 0)
+ return -EINVAL;
+
+ /* Set up authentication session prerequisites */
+ if (auth_set_prerequisites(sess, auth_xform) != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+armv8_crypto_set_session_parameters(struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ bool is_chained_op;
+ int ret;
+
+ /* Filter out spurious/broken requests */
+ if (xform == NULL)
+ return -EINVAL;
+
+ sess->chain_order = armv8_crypto_get_chain_order(xform);
+ switch (sess->chain_order) {
+ case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ is_chained_op = true;
+ break;
+ case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER:
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ is_chained_op = true;
+ break;
+ default:
+ is_chained_op = false;
+ return -EINVAL;
+ }
+
+ if (is_chained_op) {
+ ret = armv8_crypto_set_session_chained_parameters(sess,
+ cipher_xform, auth_xform);
+ if (unlikely(ret != 0)) {
+ ARMV8_CRYPTO_LOG_ERR(
+ "Invalid/unsupported chained (cipher/auth) parameters");
+ return -EINVAL;
+ }
+ } else {
+ ARMV8_CRYPTO_LOG_ERR("Invalid/unsupported operation");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/** Provide session for operation */
+static inline struct armv8_crypto_session *
+get_session(struct armv8_crypto_qp *qp, struct rte_crypto_op *op)
+{
+ struct armv8_crypto_session *sess = NULL;
+
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ /* get existing session */
+ if (likely(op->sym->session != NULL &&
+ op->sym->session->dev_type ==
+ RTE_CRYPTODEV_ARMV8_PMD)) {
+ sess = (struct armv8_crypto_session *)
+ op->sym->session->_private;
+ }
+ } else {
+ /* provide internal session */
+ void *_sess = NULL;
+
+ if (!rte_mempool_get(qp->sess_mp, (void **)&_sess)) {
+ sess = (struct armv8_crypto_session *)
+ ((struct rte_cryptodev_sym_session *)_sess)
+ ->_private;
+
+ if (unlikely(armv8_crypto_set_session_parameters(
+ sess, op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ sess = NULL;
+ } else
+ op->sym->session = _sess;
+ }
+ }
+
+ if (unlikely(sess == NULL))
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+ return sess;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Process Operations
+ *------------------------------------------------------------------------------
+ */
+
+/*----------------------------------------------------------------------------*/
+
+/** Process cipher operation */
+static inline void
+process_armv8_chained_op
+ (struct rte_crypto_op *op, struct armv8_crypto_session *sess,
+ struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
+{
+ crypto_func_t crypto_func;
+ crypto_arg_t arg;
+ struct rte_mbuf *m_asrc, *m_adst;
+ uint8_t *csrc, *cdst;
+ uint8_t *adst, *asrc;
+ uint64_t clen, alen;
+ int error;
+
+ clen = op->sym->cipher.data.length;
+ alen = op->sym->auth.data.length;
+
+ csrc = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->cipher.data.offset);
+ cdst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->cipher.data.offset);
+
+ switch (sess->chain_order) {
+ case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+ m_asrc = m_adst = mbuf_dst;
+ break;
+ case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER:
+ m_asrc = mbuf_src;
+ m_adst = mbuf_dst;
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return;
+ }
+ asrc = rte_pktmbuf_mtod_offset(m_asrc, uint8_t *,
+ op->sym->auth.data.offset);
+
+ switch (sess->auth.mode) {
+ case ARMV8_CRYPTO_AUTH_AS_AUTH:
+ /* Nothing to do here, just verify correct option */
+ break;
+ case ARMV8_CRYPTO_AUTH_AS_HMAC:
+ arg.digest.hmac.key = sess->auth.hmac.key;
+ arg.digest.hmac.i_key_pad = sess->auth.hmac.i_key_pad;
+ arg.digest.hmac.o_key_pad = sess->auth.hmac.o_key_pad;
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return;
+ }
+
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_GENERATE) {
+ adst = op->sym->auth.digest.data;
+ if (adst == NULL) {
+ adst = rte_pktmbuf_mtod_offset(m_adst,
+ uint8_t *,
+ op->sym->auth.data.offset +
+ op->sym->auth.data.length);
+ }
+ } else {
+ adst = (uint8_t *)rte_pktmbuf_append(m_asrc,
+ op->sym->auth.digest.length);
+ }
+
+ if (unlikely(op->sym->cipher.iv.length != sess->cipher.iv_len)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return;
+ }
+
+ arg.cipher.iv = op->sym->cipher.iv.data;
+ arg.cipher.key = sess->cipher.key.data;
+ /* Acquire combined mode function */
+ crypto_func = sess->crypto_func;
+ ARMV8_CRYPTO_ASSERT(crypto_func != NULL);
+ error = crypto_func(csrc, cdst, clen, asrc, adst, alen, &arg);
+ if (error != 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return;
+ }
+
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ if (memcmp(adst, op->sym->auth.digest.data,
+ op->sym->auth.digest.length) != 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ }
+ /* Trim area used for digest from mbuf. */
+ rte_pktmbuf_trim(m_asrc,
+ op->sym->auth.digest.length);
+ }
+}
+
+/** Process crypto operation for mbuf */
+static inline int
+process_op(const struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
+ struct armv8_crypto_session *sess)
+{
+ struct rte_mbuf *msrc, *mdst;
+
+ msrc = op->sym->m_src;
+ mdst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ switch (sess->chain_order) {
+ case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
+ case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER: /* Fall through */
+ process_armv8_chained_op(op, sess, msrc, mdst);
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ break;
+ }
+
+ /* Free session if a session-less crypto op */
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ memset(sess, 0, sizeof(struct armv8_crypto_session));
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ if (unlikely(op->status == RTE_CRYPTO_OP_STATUS_ERROR))
+ return -1;
+
+ return 0;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * PMD Framework
+ *------------------------------------------------------------------------------
+ */
+
+/** Enqueue burst */
+static uint16_t
+armv8_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct armv8_crypto_session *sess;
+ struct armv8_crypto_qp *qp = queue_pair;
+ int i, retval;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_session(qp, ops[i]);
+ if (unlikely(sess == NULL))
+ goto enqueue_err;
+
+ retval = process_op(qp, ops[i], sess);
+ if (unlikely(retval < 0))
+ goto enqueue_err;
+ }
+
+ retval = rte_ring_enqueue_burst(qp->processed_ops, (void *)ops, i,
+ NULL);
+ qp->stats.enqueued_count += retval;
+
+ return retval;
+
+enqueue_err:
+ retval = rte_ring_enqueue_burst(qp->processed_ops, (void *)ops, i,
+ NULL);
+ if (ops[i] != NULL)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+ qp->stats.enqueue_err_count++;
+ return retval;
+}
+
+/** Dequeue burst */
+static uint16_t
+armv8_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct armv8_crypto_qp *qp = queue_pair;
+
+ unsigned int nb_dequeued = 0;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)ops, nb_ops, NULL);
+ qp->stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/** Create ARMv8 crypto device */
+static int
+cryptodev_armv8_crypto_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_crypto_vdev_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct armv8_crypto_private *internals;
+
+ /* Check CPU for support for AES instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+ ARMV8_CRYPTO_LOG_ERR(
+ "AES instructions not supported by CPU");
+ return -EFAULT;
+ }
+
+ /* Check CPU for support for SHA instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_SHA1) ||
+ !rte_cpu_get_flag_enabled(RTE_CPUFLAG_SHA2)) {
+ ARMV8_CRYPTO_LOG_ERR(
+ "SHA1/SHA2 instructions not supported by CPU");
+ return -EFAULT;
+ }
+
+ /* Check CPU for support for Advance SIMD instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
+ ARMV8_CRYPTO_LOG_ERR(
+ "Advanced SIMD instructions not supported by CPU");
+ return -EFAULT;
+ }
+
+ if (init_params->name[0] == '\0')
+ snprintf(init_params->name, sizeof(init_params->name),
+ "%s", name);
+
+ dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
+ sizeof(struct armv8_crypto_private),
+ init_params->socket_id);
+ if (dev == NULL) {
+ ARMV8_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->dev_type = RTE_CRYPTODEV_ARMV8_PMD;
+ dev->dev_ops = rte_armv8_crypto_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = armv8_crypto_pmd_dequeue_burst;
+ dev->enqueue_burst = armv8_crypto_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_CPU_NEON |
+ RTE_CRYPTODEV_FF_CPU_ARM_CE;
+
+ /* Set vector instructions mode supported */
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ return 0;
+
+init_error:
+ ARMV8_CRYPTO_LOG_ERR(
+ "driver %s: cryptodev_armv8_crypto_create failed",
+ init_params->name);
+
+ cryptodev_armv8_crypto_uninit(vdev);
+ return -EFAULT;
+}
+
+/** Initialise ARMv8 crypto device */
+static int
+cryptodev_armv8_crypto_init(struct rte_vdev_device *vdev)
+{
+ struct rte_crypto_vdev_init_params init_params = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id(),
+ {0}
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+ rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.socket_id);
+ if (init_params.name[0] != '\0') {
+ RTE_LOG(INFO, PMD, " User defined name = %s\n",
+ init_params.name);
+ }
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.max_nb_sessions);
+
+ return cryptodev_armv8_crypto_create(name, vdev, &init_params);
+}
+
+/** Uninitialise ARMv8 crypto device */
+static int
+cryptodev_armv8_crypto_uninit(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD,
+ "Closing ARMv8 crypto device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+static struct rte_vdev_driver armv8_crypto_drv = {
+ .probe = cryptodev_armv8_crypto_init,
+ .remove = cryptodev_armv8_crypto_uninit
+};
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ARMV8_PMD, armv8_crypto_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_ARMV8_PMD, cryptodev_armv8_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ARMV8_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int>");
diff --git a/src/seastar/dpdk/drivers/crypto/armv8/rte_armv8_pmd_ops.c b/src/seastar/dpdk/drivers/crypto/armv8/rte_armv8_pmd_ops.c
new file mode 100644
index 00000000..4d9ccbfb
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/armv8/rte_armv8_pmd_ops.c
@@ -0,0 +1,370 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "armv8_crypto_defs.h"
+
+#include "rte_armv8_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities
+ armv8_crypto_pmd_capabilities[] = {
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 16,
+ .max = 128,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 16,
+ .max = 128,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/** Configure device */
+static int
+armv8_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+armv8_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+armv8_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+armv8_crypto_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+armv8_crypto_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct armv8_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+armv8_crypto_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct armv8_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ }
+}
+
+
+/** Get device info */
+static void
+armv8_crypto_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct armv8_crypto_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->dev_type = dev->dev_type;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = armv8_crypto_pmd_capabilities;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ }
+}
+
+/** Release queue pair */
+static int
+armv8_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+armv8_crypto_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct armv8_crypto_qp *qp)
+{
+ unsigned int n;
+
+ n = snprintf(qp->name, sizeof(qp->name), "armv8_crypto_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+
+/** Create a ring to place processed operations on */
+static struct rte_ring *
+armv8_crypto_pmd_qp_create_processed_ops_ring(struct armv8_crypto_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ ARMV8_CRYPTO_LOG_INFO(
+ "Reusing existing ring %s for processed ops",
+ qp->name);
+ return r;
+ }
+
+ ARMV8_CRYPTO_LOG_ERR(
+ "Unable to reuse existing ring %s for processed ops",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+
+/** Setup a queue pair */
+static int
+armv8_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct armv8_crypto_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ armv8_crypto_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("ARMv8 PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return -ENOMEM;
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (armv8_crypto_pmd_qp_set_unique_name(dev, qp) != 0)
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = armv8_crypto_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = dev->data->session_pool;
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Start queue pair */
+static int
+armv8_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+armv8_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+armv8_crypto_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the session structure */
+static unsigned
+armv8_crypto_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct armv8_crypto_session);
+}
+
+/** Configure the session from a crypto xform chain */
+static void *
+armv8_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ if (unlikely(sess == NULL)) {
+ ARMV8_CRYPTO_LOG_ERR("invalid session struct");
+ return NULL;
+ }
+
+ if (armv8_crypto_set_session_parameters(
+ sess, xform) != 0) {
+ ARMV8_CRYPTO_LOG_ERR("failed configure session parameters");
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+armv8_crypto_pmd_session_clear(struct rte_cryptodev *dev __rte_unused,
+ void *sess)
+{
+
+ /* Zero out the whole structure */
+ if (sess)
+ memset(sess, 0, sizeof(struct armv8_crypto_session));
+}
+
+struct rte_cryptodev_ops armv8_crypto_pmd_ops = {
+ .dev_configure = armv8_crypto_pmd_config,
+ .dev_start = armv8_crypto_pmd_start,
+ .dev_stop = armv8_crypto_pmd_stop,
+ .dev_close = armv8_crypto_pmd_close,
+
+ .stats_get = armv8_crypto_pmd_stats_get,
+ .stats_reset = armv8_crypto_pmd_stats_reset,
+
+ .dev_infos_get = armv8_crypto_pmd_info_get,
+
+ .queue_pair_setup = armv8_crypto_pmd_qp_setup,
+ .queue_pair_release = armv8_crypto_pmd_qp_release,
+ .queue_pair_start = armv8_crypto_pmd_qp_start,
+ .queue_pair_stop = armv8_crypto_pmd_qp_stop,
+ .queue_pair_count = armv8_crypto_pmd_qp_count,
+
+ .session_get_size = armv8_crypto_pmd_session_get_size,
+ .session_configure = armv8_crypto_pmd_session_configure,
+ .session_clear = armv8_crypto_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_armv8_crypto_pmd_ops = &armv8_crypto_pmd_ops;
diff --git a/src/seastar/dpdk/drivers/crypto/armv8/rte_armv8_pmd_private.h b/src/seastar/dpdk/drivers/crypto/armv8/rte_armv8_pmd_private.h
new file mode 100644
index 00000000..b75107f2
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/armv8/rte_armv8_pmd_private.h
@@ -0,0 +1,211 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ARMV8_PMD_PRIVATE_H_
+#define _RTE_ARMV8_PMD_PRIVATE_H_
+
+#define ARMV8_CRYPTO_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_ARMV8_CRYPTO_DEBUG
+#define ARMV8_CRYPTO_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
+ __func__, __LINE__, ## args)
+
+#define ARMV8_CRYPTO_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ARMV8_CRYPTO_PMD), \
+ __func__, __LINE__, ## args)
+
+#define ARMV8_CRYPTO_ASSERT(con) \
+do { \
+ if (!(con)) { \
+ rte_panic("%s(): " \
+ con "condition failed, line %u", __func__); \
+ } \
+} while (0)
+
+#else
+#define ARMV8_CRYPTO_LOG_INFO(fmt, args...)
+#define ARMV8_CRYPTO_LOG_DBG(fmt, args...)
+#define ARMV8_CRYPTO_ASSERT(con)
+#endif
+
+#define NBBY 8 /* Number of bits in a byte */
+#define BYTE_LENGTH(x) ((x) / NBBY) /* Number of bytes in x (round down) */
+
+/** ARMv8 operation order mode enumerator */
+enum armv8_crypto_chain_order {
+ ARMV8_CRYPTO_CHAIN_CIPHER_AUTH,
+ ARMV8_CRYPTO_CHAIN_AUTH_CIPHER,
+ ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED,
+ ARMV8_CRYPTO_CHAIN_LIST_END = ARMV8_CRYPTO_CHAIN_NOT_SUPPORTED
+};
+
+/** ARMv8 cipher operation enumerator */
+enum armv8_crypto_cipher_operation {
+ ARMV8_CRYPTO_CIPHER_OP_ENCRYPT = RTE_CRYPTO_CIPHER_OP_ENCRYPT,
+ ARMV8_CRYPTO_CIPHER_OP_DECRYPT = RTE_CRYPTO_CIPHER_OP_DECRYPT,
+ ARMV8_CRYPTO_CIPHER_OP_NOT_SUPPORTED,
+ ARMV8_CRYPTO_CIPHER_OP_LIST_END = ARMV8_CRYPTO_CIPHER_OP_NOT_SUPPORTED
+};
+
+enum armv8_crypto_cipher_keylen {
+ ARMV8_CRYPTO_CIPHER_KEYLEN_128,
+ ARMV8_CRYPTO_CIPHER_KEYLEN_192,
+ ARMV8_CRYPTO_CIPHER_KEYLEN_256,
+ ARMV8_CRYPTO_CIPHER_KEYLEN_NOT_SUPPORTED,
+ ARMV8_CRYPTO_CIPHER_KEYLEN_LIST_END =
+ ARMV8_CRYPTO_CIPHER_KEYLEN_NOT_SUPPORTED
+};
+
+/** ARMv8 auth mode enumerator */
+enum armv8_crypto_auth_mode {
+ ARMV8_CRYPTO_AUTH_AS_AUTH,
+ ARMV8_CRYPTO_AUTH_AS_HMAC,
+ ARMV8_CRYPTO_AUTH_AS_CIPHER,
+ ARMV8_CRYPTO_AUTH_NOT_SUPPORTED,
+ ARMV8_CRYPTO_AUTH_LIST_END = ARMV8_CRYPTO_AUTH_NOT_SUPPORTED
+};
+
+#define CRYPTO_ORDER_MAX ARMV8_CRYPTO_CHAIN_LIST_END
+#define CRYPTO_CIPHER_OP_MAX ARMV8_CRYPTO_CIPHER_OP_LIST_END
+#define CRYPTO_CIPHER_KEYLEN_MAX ARMV8_CRYPTO_CIPHER_KEYLEN_LIST_END
+#define CRYPTO_CIPHER_MAX RTE_CRYPTO_CIPHER_LIST_END
+#define CRYPTO_AUTH_MAX RTE_CRYPTO_AUTH_LIST_END
+
+#define HMAC_IPAD_VALUE (0x36)
+#define HMAC_OPAD_VALUE (0x5C)
+
+#define SHA256_AUTH_KEY_LENGTH (BYTE_LENGTH(256))
+#define SHA256_BLOCK_SIZE (BYTE_LENGTH(512))
+
+#define SHA1_AUTH_KEY_LENGTH (BYTE_LENGTH(160))
+#define SHA1_BLOCK_SIZE (BYTE_LENGTH(512))
+
+#define SHA_AUTH_KEY_MAX SHA256_AUTH_KEY_LENGTH
+#define SHA_BLOCK_MAX SHA256_BLOCK_SIZE
+
+typedef int (*crypto_func_t)(uint8_t *, uint8_t *, uint64_t,
+ uint8_t *, uint8_t *, uint64_t,
+ crypto_arg_t *);
+
+typedef void (*crypto_key_sched_t)(uint8_t *, const uint8_t *);
+
+/** private data structure for each ARMv8 crypto device */
+struct armv8_crypto_private {
+ unsigned int max_nb_qpairs;
+ /**< Max number of queue pairs */
+ unsigned int max_nb_sessions;
+ /**< Max number of sessions */
+};
+
+/** ARMv8 crypto queue pair */
+struct armv8_crypto_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats stats;
+ /**< Queue pair statistics */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
+} __rte_cache_aligned;
+
+/** ARMv8 crypto private session structure */
+struct armv8_crypto_session {
+ enum armv8_crypto_chain_order chain_order;
+ /**< chain order mode */
+ crypto_func_t crypto_func;
+ /**< cryptographic function to use for this session */
+
+ /** Cipher Parameters */
+ struct {
+ enum rte_crypto_cipher_operation direction;
+ /**< cipher operation direction */
+ enum rte_crypto_cipher_algorithm algo;
+ /**< cipher algorithm */
+ int iv_len;
+ /**< IV length */
+
+ struct {
+ uint8_t data[256];
+ /**< key data */
+ size_t length;
+ /**< key length in bytes */
+ } key;
+
+ crypto_key_sched_t key_sched;
+ /**< Key schedule function */
+ } cipher;
+
+ /** Authentication Parameters */
+ struct {
+ enum rte_crypto_auth_operation operation;
+ /**< auth operation generate or verify */
+ enum armv8_crypto_auth_mode mode;
+ /**< auth operation mode */
+
+ union {
+ struct {
+ /* Add data if needed */
+ } auth;
+
+ struct {
+ uint8_t i_key_pad[SHA_BLOCK_MAX]
+ __rte_cache_aligned;
+ /**< inner pad (max supported block length) */
+ uint8_t o_key_pad[SHA_BLOCK_MAX]
+ __rte_cache_aligned;
+ /**< outer pad (max supported block length) */
+ uint8_t key[SHA_AUTH_KEY_MAX];
+ /**< HMAC key (max supported length)*/
+ } hmac;
+ };
+ } auth;
+
+} __rte_cache_aligned;
+
+/** Set and validate ARMv8 crypto session parameters */
+extern int armv8_crypto_set_session_parameters(
+ struct armv8_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_armv8_crypto_pmd_ops;
+
+#endif /* _RTE_ARMV8_PMD_PRIVATE_H_ */
diff --git a/src/seastar/dpdk/drivers/crypto/armv8/rte_armv8_pmd_version.map b/src/seastar/dpdk/drivers/crypto/armv8/rte_armv8_pmd_version.map
new file mode 100644
index 00000000..1f84b68a
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/armv8/rte_armv8_pmd_version.map
@@ -0,0 +1,3 @@
+DPDK_17.02 {
+ local: *;
+};
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/Makefile b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/Makefile
new file mode 100644
index 00000000..11c7c785
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/Makefile
@@ -0,0 +1,78 @@
+# BSD LICENSE
+#
+# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+# Copyright (c) 2016 NXP. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Freescale Semiconductor, Inc nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa2_sec.a
+
+# build flags
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT),y)
+CFLAGS += -O0 -g
+CFLAGS += "-Wno-error"
+else
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+endif
+CFLAGS += -D _GNU_SOURCE
+
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec/
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2/
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+
+# versioning export map
+EXPORT_MAP := rte_pmd_dpaa2_sec_version.map
+
+# library version
+LIBABIVER := 1
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec_dpseci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += mc/dpseci.c
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += lib/librte_mempool lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += lib/librte_cryptodev
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += drivers/bus/fslmc
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += drivers/mempool/dpaa2
+
+LDLIBS += -lrte_bus_fslmc
+LDLIBS += -lrte_mempool_dpaa2
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
new file mode 100644
index 00000000..4e01fe81
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -0,0 +1,1656 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <time.h>
+#include <net/if.h>
+
+#include <rte_mbuf.h>
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_common.h>
+#include <rte_fslmc.h>
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_dpio.h>
+#include <dpaa2_hw_mempool.h>
+#include <fsl_dpseci.h>
+#include <fsl_mc_sys.h>
+
+#include "dpaa2_sec_priv.h"
+#include "dpaa2_sec_logs.h"
+
+/* RTA header files */
+#include <hw/desc/ipsec.h>
+#include <hw/desc/algo.h>
+
+/* Minimum job descriptor consists of a oneword job descriptor HEADER and
+ * a pointer to the shared descriptor
+ */
+#define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
+#define FSL_VENDOR_ID 0x1957
+#define FSL_DEVICE_ID 0x410
+#define FSL_SUBSYSTEM_SEC 1
+#define FSL_MC_DPSECI_DEVID 3
+
+#define NO_PREFETCH 0
+#define TDES_CBC_IV_LEN 8
+#define AES_CBC_IV_LEN 16
+enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
+
+static inline int
+build_authenc_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct qbman_fle *fle, *sge;
+ struct sec_flow_context *flc;
+ uint32_t auth_only_len = sym_op->auth.data.length -
+ sym_op->cipher.data.length;
+ int icv_len = sym_op->auth.digest.length;
+ uint8_t *old_icv;
+ uint32_t mem_len = (7 * sizeof(struct qbman_fle)) + icv_len;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* we are using the first FLE entry to store Mbuf.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+ fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
+ if (!fle) {
+ RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+ return -1;
+ }
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ fle = fle + 1;
+ sge = fle + 2;
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FLE_BPID(fle, bpid);
+ DPAA2_SET_FLE_BPID(fle + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ DPAA2_SET_FLE_BPID(sge + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge + 2, bpid);
+ DPAA2_SET_FLE_BPID(sge + 3, bpid);
+ } else {
+ DPAA2_SET_FD_IVP(fd);
+ DPAA2_SET_FLE_IVP(fle);
+ DPAA2_SET_FLE_IVP((fle + 1));
+ DPAA2_SET_FLE_IVP(sge);
+ DPAA2_SET_FLE_IVP((sge + 1));
+ DPAA2_SET_FLE_IVP((sge + 2));
+ DPAA2_SET_FLE_IVP((sge + 3));
+ }
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+ /* Configure FD as a FRAME LIST */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
+ "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
+ sym_op->auth.data.offset,
+ sym_op->auth.data.length,
+ sym_op->auth.digest.length,
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sym_op->cipher.iv.length,
+ sym_op->m_src->data_off);
+
+ /* Configure Output FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ if (auth_only_len)
+ DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+ fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->cipher.data.length + icv_len) :
+ sym_op->cipher.data.length;
+
+ DPAA2_SET_FLE_SG_EXT(fle);
+
+ /* Configure Output SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
+ sym_op->m_src->data_off);
+ sge->length = sym_op->cipher.data.length;
+
+ if (sess->dir == DIR_ENC) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+ sge->length = sym_op->auth.digest.length;
+ DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
+ sym_op->cipher.iv.length));
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ sge++;
+ fle++;
+
+ /* Configure Input FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ DPAA2_SET_FLE_SG_EXT(fle);
+ DPAA2_SET_FLE_FIN(fle);
+ fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->auth.data.length + sym_op->cipher.iv.length) :
+ (sym_op->auth.data.length + sym_op->cipher.iv.length +
+ sym_op->auth.digest.length);
+
+ /* Configure Input SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(sym_op->cipher.iv.data));
+ sge->length = sym_op->cipher.iv.length;
+ sge++;
+
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
+ sym_op->m_src->data_off);
+ sge->length = sym_op->auth.data.length;
+ if (sess->dir == DIR_DEC) {
+ sge++;
+ old_icv = (uint8_t *)(sge + 1);
+ memcpy(old_icv, sym_op->auth.digest.data,
+ sym_op->auth.digest.length);
+ memset(sym_op->auth.digest.data, 0, sym_op->auth.digest.length);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+ sge->length = sym_op->auth.digest.length;
+ DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
+ sym_op->auth.digest.length +
+ sym_op->cipher.iv.length));
+ }
+ DPAA2_SET_FLE_FIN(sge);
+ if (auth_only_len) {
+ DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+ DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+ }
+ return 0;
+}
+
+static inline int
+build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct qbman_fle *fle, *sge;
+ uint32_t mem_len = (sess->dir == DIR_ENC) ?
+ (3 * sizeof(struct qbman_fle)) :
+ (5 * sizeof(struct qbman_fle) +
+ sym_op->auth.digest.length);
+ struct sec_flow_context *flc;
+ struct ctxt_priv *priv = sess->ctxt;
+ uint8_t *old_digest;
+
+ PMD_INIT_FUNC_TRACE();
+
+ fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
+ if (!fle) {
+ RTE_LOG(ERR, PMD, "Memory alloc failed for FLE\n");
+ return -1;
+ }
+ /* TODO we are using the first FLE entry to store Mbuf.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ fle = fle + 1;
+
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FLE_BPID(fle, bpid);
+ DPAA2_SET_FLE_BPID(fle + 1, bpid);
+ } else {
+ DPAA2_SET_FD_IVP(fd);
+ DPAA2_SET_FLE_IVP(fle);
+ DPAA2_SET_FLE_IVP((fle + 1));
+ }
+ flc = &priv->flc_desc[DESC_INITFINAL].flc;
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
+ fle->length = sym_op->auth.digest.length;
+
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ fle++;
+
+ if (sess->dir == DIR_ENC) {
+ DPAA2_SET_FLE_ADDR(fle,
+ DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(fle, sym_op->auth.data.offset +
+ sym_op->m_src->data_off);
+ DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length);
+ fle->length = sym_op->auth.data.length;
+ } else {
+ sge = fle + 2;
+ DPAA2_SET_FLE_SG_EXT(fle);
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ DPAA2_SET_FLE_BPID(sge + 1, bpid);
+ } else {
+ DPAA2_SET_FLE_IVP(sge);
+ DPAA2_SET_FLE_IVP((sge + 1));
+ }
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->auth.data.offset +
+ sym_op->m_src->data_off);
+
+ DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
+ sym_op->auth.digest.length);
+ sge->length = sym_op->auth.data.length;
+ sge++;
+ old_digest = (uint8_t *)(sge + 1);
+ rte_memcpy(old_digest, sym_op->auth.digest.data,
+ sym_op->auth.digest.length);
+ memset(sym_op->auth.digest.data, 0, sym_op->auth.digest.length);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
+ sge->length = sym_op->auth.digest.length;
+ fle->length = sym_op->auth.data.length +
+ sym_op->auth.digest.length;
+ DPAA2_SET_FLE_FIN(sge);
+ }
+ DPAA2_SET_FLE_FIN(fle);
+
+ return 0;
+}
+
+static int
+build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct qbman_fle *fle, *sge;
+ uint32_t mem_len = (5 * sizeof(struct qbman_fle));
+ struct sec_flow_context *flc;
+ struct ctxt_priv *priv = sess->ctxt;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* todo - we can use some mempool to avoid malloc here */
+ fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
+ if (!fle) {
+ RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+ return -1;
+ }
+ /* TODO we are using the first FLE entry to store Mbuf.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ fle = fle + 1;
+ sge = fle + 2;
+
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FLE_BPID(fle, bpid);
+ DPAA2_SET_FLE_BPID(fle + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ DPAA2_SET_FLE_BPID(sge + 1, bpid);
+ } else {
+ DPAA2_SET_FD_IVP(fd);
+ DPAA2_SET_FLE_IVP(fle);
+ DPAA2_SET_FLE_IVP((fle + 1));
+ DPAA2_SET_FLE_IVP(sge);
+ DPAA2_SET_FLE_IVP((sge + 1));
+ }
+
+ flc = &priv->flc_desc[0].flc;
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
+ sym_op->cipher.iv.length);
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sym_op->cipher.iv.length,
+ sym_op->m_src->data_off);
+
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
+ sym_op->m_src->data_off);
+
+ fle->length = sym_op->cipher.data.length + sym_op->cipher.iv.length;
+
+ PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
+ flc, fle, fle->addr_hi, fle->addr_lo, fle->length);
+
+ fle++;
+
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ fle->length = sym_op->cipher.data.length + sym_op->cipher.iv.length;
+
+ DPAA2_SET_FLE_SG_EXT(fle);
+
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(sym_op->cipher.iv.data));
+ sge->length = sym_op->cipher.iv.length;
+
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
+ sym_op->m_src->data_off);
+
+ sge->length = sym_op->cipher.data.length;
+ DPAA2_SET_FLE_FIN(sge);
+ DPAA2_SET_FLE_FIN(fle);
+
+ PMD_TX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
+ (void *)DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[bpid].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
+
+ return 0;
+}
+
+static inline int
+build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ int ret = -1;
+
+ PMD_INIT_FUNC_TRACE();
+
+ switch (sess->ctxt_type) {
+ case DPAA2_SEC_CIPHER:
+ ret = build_cipher_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_AUTH:
+ ret = build_auth_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_CIPHER_HASH:
+ ret = build_authenc_fd(sess, op, fd, bpid);
+ break;
+ case DPAA2_SEC_HASH_CIPHER:
+ default:
+ RTE_LOG(ERR, PMD, "error: Unsupported session\n");
+ }
+ return ret;
+}
+
+static uint16_t
+dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ /* Function to transmit the frames to given device and VQ*/
+ uint32_t loop;
+ int32_t ret;
+ struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ uint32_t frames_to_send;
+ struct qbman_eq_desc eqdesc;
+ struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
+ struct qbman_swp *swp;
+ uint16_t num_tx = 0;
+ /*todo - need to support multiple buffer pools */
+ uint16_t bpid;
+ struct rte_mempool *mb_pool;
+ dpaa2_sec_session *sess;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ if (ops[0]->sym->sess_type != RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n");
+ return 0;
+ }
+ /*Prepare enqueue descriptor*/
+ qbman_eq_desc_clear(&eqdesc);
+ qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
+ qbman_eq_desc_set_response(&eqdesc, 0, 0);
+ qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
+
+ if (!DPAA2_PER_LCORE_SEC_DPIO) {
+ ret = dpaa2_affine_qbman_swp_sec();
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_SEC_PORTAL;
+
+ while (nb_ops) {
+ frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
+
+ for (loop = 0; loop < frames_to_send; loop++) {
+ /*Clear the unused FD fields before sending*/
+ memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+ sess = (dpaa2_sec_session *)
+ (*ops)->sym->session->_private;
+ mb_pool = (*ops)->sym->m_src->pool;
+ bpid = mempool_to_bpid(mb_pool);
+ ret = build_sec_fd(sess, *ops, &fd_arr[loop], bpid);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "error: Improper packet"
+ " contents for crypto operation\n");
+ goto skip_tx;
+ }
+ ops++;
+ }
+ loop = 0;
+ while (loop < frames_to_send) {
+ loop += qbman_swp_send_multiple(swp, &eqdesc,
+ &fd_arr[loop],
+ frames_to_send - loop);
+ }
+
+ num_tx += frames_to_send;
+ nb_ops -= frames_to_send;
+ }
+skip_tx:
+ dpaa2_qp->tx_vq.tx_pkts += num_tx;
+ dpaa2_qp->tx_vq.err_pkts += nb_ops;
+ return num_tx;
+}
+
+static inline struct rte_crypto_op *
+sec_fd_to_mbuf(const struct qbman_fd *fd)
+{
+ struct qbman_fle *fle;
+ struct rte_crypto_op *op;
+
+ fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+
+ PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x",
+ fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
+
+ /* we are using the first FLE entry to store Mbuf.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+
+ if (unlikely(DPAA2_GET_FD_IVP(fd))) {
+ /* TODO complete it. */
+ RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?");
+ return NULL;
+ }
+ op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
+ DPAA2_GET_FLE_ADDR((fle - 1)));
+
+ /* Prefeth op */
+ rte_prefetch0(op->sym->m_src);
+
+ PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p",
+ (void *)op->sym->m_src, op->sym->m_src->buf_addr);
+
+ PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
+ (void *)DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
+
+ /* free the fle memory */
+ rte_free(fle - 1);
+
+ return op;
+}
+
+static uint16_t
+dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ /* Function is responsible to receive frames for a given device and VQ*/
+ struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
+ struct qbman_result *dq_storage;
+ uint32_t fqid = dpaa2_qp->rx_vq.fqid;
+ int ret, num_rx = 0;
+ uint8_t is_last = 0, status;
+ struct qbman_swp *swp;
+ const struct qbman_fd *fd;
+ struct qbman_pull_desc pulldesc;
+
+ if (!DPAA2_PER_LCORE_SEC_DPIO) {
+ ret = dpaa2_affine_qbman_swp_sec();
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_SEC_PORTAL;
+ dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
+
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_numframes(&pulldesc,
+ (nb_ops > DPAA2_DQRR_RING_SIZE) ?
+ DPAA2_DQRR_RING_SIZE : nb_ops);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
+ 1);
+
+ /*Issue a volatile dequeue command. */
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ RTE_LOG(WARNING, PMD, "SEC VDQ command is not issued."
+ "QBMAN is busy\n");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ };
+
+ /* Receive the packets till Last Dequeue entry is found with
+ * respect to the above issues PULL command.
+ */
+ while (!is_last) {
+ /* Check if the previous issued command is completed.
+ * Also seems like the SWP is shared between the Ethernet Driver
+ * and the SEC driver.
+ */
+ while (!qbman_check_command_complete(swp, dq_storage))
+ ;
+
+ /* Loop until the dq_storage is updated with
+ * new token by QBMAN
+ */
+ while (!qbman_result_has_new_result(swp, dq_storage))
+ ;
+ /* Check whether Last Pull command is Expired and
+ * setting Condition for Loop termination
+ */
+ if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+ is_last = 1;
+ /* Check for valid frame. */
+ status = (uint8_t)qbman_result_DQ_flags(dq_storage);
+ if (unlikely(
+ (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
+ PMD_RX_LOG(DEBUG, "No frame is delivered");
+ continue;
+ }
+ }
+
+ fd = qbman_result_DQ_fd(dq_storage);
+ ops[num_rx] = sec_fd_to_mbuf(fd);
+
+ if (unlikely(fd->simple.frc)) {
+ /* TODO Parse SEC errors */
+ RTE_LOG(ERR, PMD, "SEC returned Error - %x\n",
+ fd->simple.frc);
+ ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ } else {
+ ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+
+ num_rx++;
+ dq_storage++;
+ } /* End of Packet Rx loop */
+
+ dpaa2_qp->rx_vq.rx_pkts += num_rx;
+
+ PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx);
+ /*Return the total number of packets received to DPAA2 app*/
+ return num_rx;
+}
+
+/** Release queue pair */
+static int
+dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+ struct dpaa2_sec_qp *qp =
+ (struct dpaa2_sec_qp *)dev->data->queue_pairs[queue_pair_id];
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (qp->rx_vq.q_storage) {
+ dpaa2_free_dq_storage(qp->rx_vq.q_storage);
+ rte_free(qp->rx_vq.q_storage);
+ }
+ rte_free(qp);
+
+ dev->data->queue_pairs[queue_pair_id] = NULL;
+
+ return 0;
+}
+
+/** Setup a queue pair */
+static int
+dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
+ __rte_unused int socket_id)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct dpaa2_sec_qp *qp;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ struct dpseci_rx_queue_cfg cfg;
+ int32_t retcode;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* If qp is already in use free ring memory and qp metadata. */
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ PMD_DRV_LOG(INFO, "QP already setup");
+ return 0;
+ }
+
+ PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
+ dev, qp_id, qp_conf);
+
+ memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
+
+ qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
+ RTE_CACHE_LINE_SIZE);
+ if (!qp) {
+ RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n");
+ return -1;
+ }
+
+ qp->rx_vq.dev = dev;
+ qp->tx_vq.dev = dev;
+ qp->rx_vq.q_storage = rte_malloc("sec dq storage",
+ sizeof(struct queue_storage_info_t),
+ RTE_CACHE_LINE_SIZE);
+ if (!qp->rx_vq.q_storage) {
+ RTE_LOG(ERR, PMD, "malloc failed for q_storage\n");
+ return -1;
+ }
+ memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
+
+ if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
+ RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n");
+ return -1;
+ }
+
+ dev->data->queue_pairs[qp_id] = qp;
+
+ cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
+ cfg.user_ctx = (uint64_t)(&qp->rx_vq);
+ retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
+ qp_id, &cfg);
+ return retcode;
+}
+
+/** Start queue pair */
+static int
+dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return 0;
+}
+
+/** Stop queue pair */
+static int
+dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return 0;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni gcm session structure */
+static unsigned int
+dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return sizeof(dpaa2_sec_session);
+}
+
+static void
+dpaa2_sec_session_initialize(struct rte_mempool *mp __rte_unused,
+ void *sess __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static int
+dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ dpaa2_sec_session *session)
+{
+ struct dpaa2_sec_cipher_ctxt *ctxt = &session->ext_params.cipher_ctxt;
+ struct alginfo cipherdata;
+ int bufsize, i;
+ struct ctxt_priv *priv;
+ struct sec_flow_context *flc;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* For SEC CIPHER only one descriptor is required. */
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+ return -1;
+ }
+
+ flc = &priv->flc_desc[0].flc;
+
+ session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL) {
+ RTE_LOG(ERR, PMD, "No Memory for cipher key");
+ rte_free(priv);
+ return -1;
+ }
+ session->cipher_key.length = xform->cipher.key.length;
+
+ memcpy(session->cipher_key.data, xform->cipher.key.data,
+ xform->cipher.key.length);
+ cipherdata.key = (uint64_t)session->cipher_key.data;
+ cipherdata.keylen = session->cipher_key.length;
+ cipherdata.key_enc_flags = 0;
+ cipherdata.key_type = RTA_DATA_IMM;
+
+ switch (xform->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ ctxt->iv.length = AES_CBC_IV_LEN;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ cipherdata.algtype = OP_ALG_ALGSEL_3DES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ ctxt->iv.length = TDES_CBC_IV_LEN;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ case RTE_CRYPTO_CIPHER_AES_GCM:
+ case RTE_CRYPTO_CIPHER_AES_CCM:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_XTS:
+ case RTE_CRYPTO_CIPHER_AES_F8:
+ case RTE_CRYPTO_CIPHER_ARC4:
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ case RTE_CRYPTO_CIPHER_NULL:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
+ xform->cipher.algo);
+ goto error_out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+ xform->cipher.algo);
+ goto error_out;
+ }
+ session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
+ &cipherdata, NULL, ctxt->iv.length,
+ session->dir);
+ if (bufsize < 0) {
+ RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n");
+ goto error_out;
+ }
+ flc->dhr = 0;
+ flc->bpv0 = 0x1;
+ flc->mode_bits = 0x8000;
+
+ flc->word1_sdl = (uint8_t)bufsize;
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ session->ctxt = priv;
+
+ for (i = 0; i < bufsize; i++)
+ PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ i, priv->flc_desc[0].desc[i]);
+
+ return 0;
+
+error_out:
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+static int
+dpaa2_sec_auth_init(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ dpaa2_sec_session *session)
+{
+ struct dpaa2_sec_auth_ctxt *ctxt = &session->ext_params.auth_ctxt;
+ struct alginfo authdata;
+ unsigned int bufsize;
+ struct ctxt_priv *priv;
+ struct sec_flow_context *flc;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* For SEC AUTH three descriptors are required for various stages */
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) + 3 *
+ sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+ return -1;
+ }
+
+ flc = &priv->flc_desc[DESC_INITFINAL].flc;
+
+ session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL) {
+ RTE_LOG(ERR, PMD, "No Memory for auth key");
+ rte_free(priv);
+ return -1;
+ }
+ session->auth_key.length = xform->auth.key.length;
+
+ memcpy(session->auth_key.data, xform->auth.key.data,
+ xform->auth.key.length);
+ authdata.key = (uint64_t)session->auth_key.data;
+ authdata.keylen = session->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+
+ switch (xform->auth.algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA1;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_MD5;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA256;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA384;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA512;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA224;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_AES_GCM:
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ case RTE_CRYPTO_AUTH_NULL:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_CCM:
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
+ xform->auth.algo);
+ goto error_out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+ xform->auth.algo);
+ goto error_out;
+ }
+ session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
+ DIR_ENC : DIR_DEC;
+
+ bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
+ 1, 0, &authdata, !session->dir,
+ ctxt->trunc_len);
+
+ flc->word1_sdl = (uint8_t)bufsize;
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ session->ctxt = priv;
+
+ return 0;
+
+error_out:
+ rte_free(session->auth_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+static int
+dpaa2_sec_aead_init(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ dpaa2_sec_session *session)
+{
+ struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+ struct alginfo authdata, cipherdata;
+ unsigned int bufsize;
+ struct ctxt_priv *priv;
+ struct sec_flow_context *flc;
+ struct rte_crypto_cipher_xform *cipher_xform;
+ struct rte_crypto_auth_xform *auth_xform;
+ int err;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (session->ext_params.aead_ctxt.auth_cipher_text) {
+ cipher_xform = &xform->cipher;
+ auth_xform = &xform->next->auth;
+ session->ctxt_type =
+ (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DPAA2_SEC_CIPHER_HASH : DPAA2_SEC_HASH_CIPHER;
+ } else {
+ cipher_xform = &xform->next->cipher;
+ auth_xform = &xform->auth;
+ session->ctxt_type =
+ (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
+ }
+ /* For SEC AEAD only one descriptor is required */
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+ return -1;
+ }
+
+ flc = &priv->flc_desc[0].flc;
+
+ session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
+ RTE_LOG(ERR, PMD, "No Memory for cipher key");
+ rte_free(priv);
+ return -1;
+ }
+ session->cipher_key.length = cipher_xform->key.length;
+ session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
+ RTE_LOG(ERR, PMD, "No Memory for auth key");
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
+ return -1;
+ }
+ session->auth_key.length = auth_xform->key.length;
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+
+ ctxt->trunc_len = auth_xform->digest_length;
+ authdata.key = (uint64_t)session->auth_key.data;
+ authdata.keylen = session->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA1;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_MD5;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA224;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA256;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA384;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ authdata.algtype = OP_ALG_ALGSEL_SHA512;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_AES_GCM:
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ case RTE_CRYPTO_AUTH_NULL:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_CCM:
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
+ auth_xform->algo);
+ goto error_out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+ auth_xform->algo);
+ goto error_out;
+ }
+ cipherdata.key = (uint64_t)session->cipher_key.data;
+ cipherdata.keylen = session->cipher_key.length;
+ cipherdata.key_enc_flags = 0;
+ cipherdata.key_type = RTA_DATA_IMM;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ ctxt->iv.length = AES_CBC_IV_LEN;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ cipherdata.algtype = OP_ALG_ALGSEL_3DES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ ctxt->iv.length = TDES_CBC_IV_LEN;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_GCM:
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ case RTE_CRYPTO_CIPHER_AES_CCM:
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
+ cipher_xform->algo);
+ goto error_out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+ cipher_xform->algo);
+ goto error_out;
+ }
+ session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ priv->flc_desc[0].desc[0] = cipherdata.keylen;
+ priv->flc_desc[0].desc[1] = authdata.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)priv->flc_desc[0].desc,
+ &priv->flc_desc[0].desc[2], 2);
+
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths");
+ goto error_out;
+ }
+ if (priv->flc_desc[0].desc[2] & 1) {
+ cipherdata.key_type = RTA_DATA_IMM;
+ } else {
+ cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
+ cipherdata.key_type = RTA_DATA_PTR;
+ }
+ if (priv->flc_desc[0].desc[2] & (1 << 1)) {
+ authdata.key_type = RTA_DATA_IMM;
+ } else {
+ authdata.key = DPAA2_VADDR_TO_IOVA(authdata.key);
+ authdata.key_type = RTA_DATA_PTR;
+ }
+ priv->flc_desc[0].desc[0] = 0;
+ priv->flc_desc[0].desc[1] = 0;
+ priv->flc_desc[0].desc[2] = 0;
+
+ if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
+ bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
+ 0, &cipherdata, &authdata,
+ ctxt->iv.length,
+ ctxt->auth_only_len,
+ ctxt->trunc_len,
+ session->dir);
+ } else {
+ RTE_LOG(ERR, PMD, "Hash before cipher not supported");
+ goto error_out;
+ }
+
+ flc->word1_sdl = (uint8_t)bufsize;
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ session->ctxt = priv;
+
+ return 0;
+
+error_out:
+ rte_free(session->cipher_key.data);
+ rte_free(session->auth_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+static void *
+dpaa2_sec_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ dpaa2_sec_session *session = sess;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(sess == NULL)) {
+ RTE_LOG(ERR, PMD, "invalid session struct");
+ return NULL;
+ }
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
+ session->ctxt_type = DPAA2_SEC_CIPHER;
+ dpaa2_sec_cipher_init(dev, xform, session);
+
+ /* Authentication Only */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL) {
+ session->ctxt_type = DPAA2_SEC_AUTH;
+ dpaa2_sec_auth_init(dev, xform, session);
+
+ /* Cipher then Authenticate */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ session->ext_params.aead_ctxt.auth_cipher_text = true;
+ dpaa2_sec_aead_init(dev, xform, session);
+
+ /* Authenticate then Cipher */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ session->ext_params.aead_ctxt.auth_cipher_text = false;
+ dpaa2_sec_aead_init(dev, xform, session);
+ } else {
+ RTE_LOG(ERR, PMD, "Invalid crypto type");
+ return NULL;
+ }
+
+ return session;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+dpaa2_sec_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+ dpaa2_sec_session *s = (dpaa2_sec_session *)sess;
+
+ if (s) {
+ rte_free(s->ctxt);
+ rte_free(s->cipher_key.data);
+ rte_free(s->auth_key.data);
+ memset(sess, 0, sizeof(dpaa2_sec_session));
+ }
+}
+
+static int
+dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return -ENOTSUP;
+}
+
+static int
+dpaa2_sec_dev_start(struct rte_cryptodev *dev)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ struct dpseci_attr attr;
+ struct dpaa2_queue *dpaa2_q;
+ struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
+ dev->data->queue_pairs;
+ struct dpseci_rx_queue_attr rx_attr;
+ struct dpseci_tx_queue_attr tx_attr;
+ int ret, i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ memset(&attr, 0, sizeof(struct dpseci_attr));
+
+ ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n",
+ priv->hw_id);
+ goto get_attr_failure;
+ }
+ ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
+ if (ret) {
+ PMD_INIT_LOG(ERR,
+ "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n");
+ goto get_attr_failure;
+ }
+ for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
+ dpaa2_q = &qp[i]->rx_vq;
+ dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
+ &rx_attr);
+ dpaa2_q->fqid = rx_attr.fqid;
+ PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid);
+ }
+ for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
+ dpaa2_q = &qp[i]->tx_vq;
+ dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
+ &tx_attr);
+ dpaa2_q->fqid = tx_attr.fqid;
+ PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid);
+ }
+
+ return 0;
+get_attr_failure:
+ dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
+ return -1;
+}
+
+static void
+dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device",
+ priv->hw_id);
+ return;
+ }
+
+ ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n",
+ ret);
+ return;
+ }
+}
+
+static int
+dpaa2_sec_dev_close(struct rte_cryptodev *dev)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Function is reverse of dpaa2_sec_dev_init.
+ * It does the following:
+ * 1. Detach a DPSECI from attached resources i.e. buffer pools, dpbp_id
+ * 2. Close the DPSECI device
+ * 3. Free the allocated resources.
+ */
+
+ /*Close the device at underlying layer*/
+ ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failure closing dpseci device with"
+ " error code %d\n", ret);
+ return -1;
+ }
+
+ /*Free the allocated memory for ethernet private data and dpseci*/
+ priv->hw = NULL;
+ free(dpseci);
+
+ return 0;
+}
+
+static void
+dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ if (info != NULL) {
+ info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = dpaa2_sec_capabilities;
+ info->sym.max_nb_sessions = internals->max_nb_sessions;
+ info->dev_type = RTE_CRYPTODEV_DPAA2_SEC_PMD;
+ }
+}
+
+static
+void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ struct dpseci_sec_counters counters = {0};
+ struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
+ dev->data->queue_pairs;
+ int ret, i;
+
+ PMD_INIT_FUNC_TRACE();
+ if (stats == NULL) {
+ PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
+ return;
+ }
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ if (qp[i] == NULL) {
+ PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
+ continue;
+ }
+
+ stats->enqueued_count += qp[i]->tx_vq.tx_pkts;
+ stats->dequeued_count += qp[i]->rx_vq.rx_pkts;
+ stats->enqueue_err_count += qp[i]->tx_vq.err_pkts;
+ stats->dequeue_err_count += qp[i]->rx_vq.err_pkts;
+ }
+
+ ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
+ &counters);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n");
+ } else {
+ PMD_DRV_LOG(INFO, "dpseci hw stats:"
+ "\n\tNumber of Requests Dequeued = %lu"
+ "\n\tNumber of Outbound Encrypt Requests = %lu"
+ "\n\tNumber of Inbound Decrypt Requests = %lu"
+ "\n\tNumber of Outbound Bytes Encrypted = %lu"
+ "\n\tNumber of Outbound Bytes Protected = %lu"
+ "\n\tNumber of Inbound Bytes Decrypted = %lu"
+ "\n\tNumber of Inbound Bytes Validated = %lu",
+ counters.dequeued_requests,
+ counters.ob_enc_requests,
+ counters.ib_dec_requests,
+ counters.ob_enc_bytes,
+ counters.ob_prot_bytes,
+ counters.ib_dec_bytes,
+ counters.ib_valid_bytes);
+ }
+}
+
+static
+void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
+{
+ int i;
+ struct dpaa2_sec_qp **qp = (struct dpaa2_sec_qp **)
+ (dev->data->queue_pairs);
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ if (qp[i] == NULL) {
+ PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
+ continue;
+ }
+ qp[i]->tx_vq.rx_pkts = 0;
+ qp[i]->tx_vq.tx_pkts = 0;
+ qp[i]->tx_vq.err_pkts = 0;
+ qp[i]->rx_vq.rx_pkts = 0;
+ qp[i]->rx_vq.tx_pkts = 0;
+ qp[i]->rx_vq.err_pkts = 0;
+ }
+}
+
+static struct rte_cryptodev_ops crypto_ops = {
+ .dev_configure = dpaa2_sec_dev_configure,
+ .dev_start = dpaa2_sec_dev_start,
+ .dev_stop = dpaa2_sec_dev_stop,
+ .dev_close = dpaa2_sec_dev_close,
+ .dev_infos_get = dpaa2_sec_dev_infos_get,
+ .stats_get = dpaa2_sec_stats_get,
+ .stats_reset = dpaa2_sec_stats_reset,
+ .queue_pair_setup = dpaa2_sec_queue_pair_setup,
+ .queue_pair_release = dpaa2_sec_queue_pair_release,
+ .queue_pair_start = dpaa2_sec_queue_pair_start,
+ .queue_pair_stop = dpaa2_sec_queue_pair_stop,
+ .queue_pair_count = dpaa2_sec_queue_pair_count,
+ .session_get_size = dpaa2_sec_session_get_size,
+ .session_initialize = dpaa2_sec_session_initialize,
+ .session_configure = dpaa2_sec_session_configure,
+ .session_clear = dpaa2_sec_session_clear,
+};
+
+static int
+dpaa2_sec_uninit(const struct rte_cryptodev_driver *crypto_drv __rte_unused,
+ struct rte_cryptodev *dev)
+{
+ PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
+ dev->data->name, rte_socket_id());
+
+ return 0;
+}
+
+static int
+dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
+{
+ struct dpaa2_sec_dev_private *internals;
+ struct rte_device *dev = cryptodev->device;
+ struct rte_dpaa2_device *dpaa2_dev;
+ struct fsl_mc_io *dpseci;
+ uint16_t token;
+ struct dpseci_attr attr;
+ int retcode, hw_id;
+
+ PMD_INIT_FUNC_TRACE();
+ dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
+ if (dpaa2_dev == NULL) {
+ PMD_INIT_LOG(ERR, "dpaa2_device not found\n");
+ return -1;
+ }
+ hw_id = dpaa2_dev->object_id;
+
+ cryptodev->dev_type = RTE_CRYPTODEV_DPAA2_SEC_PMD;
+ cryptodev->dev_ops = &crypto_ops;
+
+ cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
+ cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+ internals = cryptodev->data->dev_private;
+ internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ PMD_INIT_LOG(DEBUG, "Device already init by primary process");
+ return 0;
+ }
+ /*Open the rte device via MC and save the handle for further use*/
+ dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
+ sizeof(struct fsl_mc_io), 0);
+ if (!dpseci) {
+ PMD_INIT_LOG(ERR,
+ "Error in allocating the memory for dpsec object");
+ return -1;
+ }
+ dpseci->regs = rte_mcp_ptr_list[0];
+
+ retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
+ if (retcode != 0) {
+ PMD_INIT_LOG(ERR, "Cannot open the dpsec device: Error = %x",
+ retcode);
+ goto init_error;
+ }
+ retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
+ if (retcode != 0) {
+ PMD_INIT_LOG(ERR,
+ "Cannot get dpsec device attributed: Error = %x",
+ retcode);
+ goto init_error;
+ }
+ sprintf(cryptodev->data->name, "dpsec-%u", hw_id);
+
+ internals->max_nb_queue_pairs = attr.num_tx_queues;
+ cryptodev->data->nb_queue_pairs = internals->max_nb_queue_pairs;
+ internals->hw = dpseci;
+ internals->token = token;
+
+ PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
+ return 0;
+
+init_error:
+ PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
+
+ /* dpaa2_sec_uninit(crypto_dev_name); */
+ return -EFAULT;
+}
+
+static int
+cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
+ struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_cryptodev *cryptodev;
+ char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ int retval;
+
+ sprintf(cryptodev_name, "dpsec-%d", dpaa2_dev->object_id);
+
+ cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
+ if (cryptodev == NULL)
+ return -ENOMEM;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ cryptodev->data->dev_private = rte_zmalloc_socket(
+ "cryptodev private structure",
+ sizeof(struct dpaa2_sec_dev_private),
+ RTE_CACHE_LINE_SIZE,
+ rte_socket_id());
+
+ if (cryptodev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private "
+ "device data");
+ }
+
+ dpaa2_dev->cryptodev = cryptodev;
+ cryptodev->device = &dpaa2_dev->device;
+ cryptodev->driver = (struct rte_cryptodev_driver *)dpaa2_drv;
+
+ /* init user callbacks */
+ TAILQ_INIT(&(cryptodev->link_intr_cbs));
+
+ /* Invoke PMD device initialization function */
+ retval = dpaa2_sec_dev_init(cryptodev);
+ if (retval == 0)
+ return 0;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(cryptodev->data->dev_private);
+
+ cryptodev->attached = RTE_CRYPTODEV_DETACHED;
+
+ return -ENXIO;
+}
+
+static int
+cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_cryptodev *cryptodev;
+ int ret;
+
+ cryptodev = dpaa2_dev->cryptodev;
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ ret = dpaa2_sec_uninit(NULL, cryptodev);
+ if (ret)
+ return ret;
+
+ /* free crypto device */
+ rte_cryptodev_pmd_release_device(cryptodev);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(cryptodev->data->dev_private);
+
+ cryptodev->device = NULL;
+ cryptodev->driver = NULL;
+ cryptodev->data = NULL;
+
+ return 0;
+}
+
+static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
+ .drv_type = DPAA2_MC_DPSECI_DEVID,
+ .driver = {
+ .name = "DPAA2 SEC PMD"
+ },
+ .probe = cryptodev_dpaa2_sec_probe,
+ .remove = cryptodev_dpaa2_sec_remove,
+};
+
+RTE_PMD_REGISTER_DPAA2(dpaa2_sec_pmd, rte_dpaa2_sec_driver);
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
new file mode 100644
index 00000000..03d4c703
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
@@ -0,0 +1,70 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _DPAA2_SEC_LOGS_H_
+#define _DPAA2_SEC_LOGS_H_
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
+
+#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE() do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#else
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
+#endif
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _DPAA2_SEC_LOGS_H_ */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
new file mode 100644
index 00000000..f5c61694
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -0,0 +1,368 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_DPAA2_SEC_PMD_PRIVATE_H_
+#define _RTE_DPAA2_SEC_PMD_PRIVATE_H_
+
+#define MAX_QUEUES 64
+#define MAX_DESC_SIZE 64
+/** private data structure for each DPAA2_SEC device */
+struct dpaa2_sec_dev_private {
+ void *mc_portal; /**< MC Portal for configuring this device */
+ void *hw; /**< Hardware handle for this device.Used by NADK framework */
+ int32_t hw_id; /**< An unique ID of this device instance */
+ int32_t vfio_fd; /**< File descriptor received via VFIO */
+ uint16_t token; /**< Token required by DPxxx objects */
+ unsigned int max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+ unsigned int max_nb_sessions;
+ /**< Max number of sessions supported by device */
+};
+
+struct dpaa2_sec_qp {
+ struct dpaa2_queue rx_vq;
+ struct dpaa2_queue tx_vq;
+};
+
+enum shr_desc_type {
+ DESC_UPDATE,
+ DESC_FINAL,
+ DESC_INITFINAL,
+};
+
+#define DIR_ENC 1
+#define DIR_DEC 0
+
+/* SEC Flow Context Descriptor */
+struct sec_flow_context {
+ /* word 0 */
+ uint16_t word0_sdid; /* 11-0 SDID */
+ uint16_t word0_res; /* 31-12 reserved */
+
+ /* word 1 */
+ uint8_t word1_sdl; /* 5-0 SDL */
+ /* 7-6 reserved */
+
+ uint8_t word1_bits_15_8; /* 11-8 CRID */
+ /* 14-12 reserved */
+ /* 15 CRJD */
+
+ uint8_t word1_bits23_16; /* 16 EWS */
+ /* 17 DAC */
+ /* 18,19,20 ? */
+ /* 23-21 reserved */
+
+ uint8_t word1_bits31_24; /* 24 RSC */
+ /* 25 RBMT */
+ /* 31-26 reserved */
+
+ /* word 2 RFLC[31-0] */
+ uint32_t word2_rflc_31_0;
+
+ /* word 3 RFLC[63-32] */
+ uint32_t word3_rflc_63_32;
+
+ /* word 4 */
+ uint16_t word4_iicid; /* 15-0 IICID */
+ uint16_t word4_oicid; /* 31-16 OICID */
+
+ /* word 5 */
+ uint32_t word5_ofqid:24; /* 23-0 OFQID */
+ uint32_t word5_31_24:8;
+ /* 24 OSC */
+ /* 25 OBMT */
+ /* 29-26 reserved */
+ /* 31-30 ICR */
+
+ /* word 6 */
+ uint32_t word6_oflc_31_0;
+
+ /* word 7 */
+ uint32_t word7_oflc_63_32;
+
+ /* Word 8-15 storage profiles */
+ uint16_t dl; /**< DataLength(correction) */
+ uint16_t reserved; /**< reserved */
+ uint16_t dhr; /**< DataHeadRoom(correction) */
+ uint16_t mode_bits; /**< mode bits */
+ uint16_t bpv0; /**< buffer pool0 valid */
+ uint16_t bpid0; /**< Bypass Memory Translation */
+ uint16_t bpv1; /**< buffer pool1 valid */
+ uint16_t bpid1; /**< Bypass Memory Translation */
+ uint64_t word_12_15[2]; /**< word 12-15 are reserved */
+};
+
+struct sec_flc_desc {
+ struct sec_flow_context flc;
+ uint32_t desc[MAX_DESC_SIZE];
+};
+
+struct ctxt_priv {
+ struct sec_flc_desc flc_desc[0];
+};
+
+enum dpaa2_sec_op_type {
+ DPAA2_SEC_NONE, /*!< No Cipher operations*/
+ DPAA2_SEC_CIPHER,/*!< CIPHER operations */
+ DPAA2_SEC_AUTH, /*!< Authentication Operations */
+ DPAA2_SEC_CIPHER_HASH, /*!< Authenticated Encryption with
+ * associated data
+ */
+ DPAA2_SEC_HASH_CIPHER, /*!< Encryption with Authenticated
+ * associated data
+ */
+ DPAA2_SEC_IPSEC, /*!< IPSEC protocol operations*/
+ DPAA2_SEC_PDCP, /*!< PDCP protocol operations*/
+ DPAA2_SEC_PKC, /*!< Public Key Cryptographic Operations */
+ DPAA2_SEC_MAX
+};
+
+struct dpaa2_sec_cipher_ctxt {
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } iv; /**< Initialisation vector parameters */
+ uint8_t *init_counter; /*!< Set initial counter for CTR mode */
+};
+
+struct dpaa2_sec_auth_ctxt {
+ uint8_t trunc_len; /*!< Length for output ICV, should
+ * be 0 if no truncation required
+ */
+};
+
+struct dpaa2_sec_aead_ctxt {
+ struct {
+ uint8_t *data;
+ uint16_t length;
+ } iv; /**< Initialisation vector parameters */
+ uint16_t auth_only_len; /*!< Length of data for Auth only */
+ uint8_t auth_cipher_text; /**< Authenticate/cipher ordering */
+ uint8_t trunc_len; /*!< Length for output ICV, should
+ * be 0 if no truncation required
+ */
+};
+
+typedef struct dpaa2_sec_session_entry {
+ void *ctxt;
+ uint8_t ctxt_type;
+ uint8_t dir; /*!< Operation Direction */
+ enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
+ enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } cipher_key;
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } auth_key;
+ uint8_t status;
+ union {
+ struct dpaa2_sec_cipher_ctxt cipher_ctxt;
+ struct dpaa2_sec_auth_ctxt auth_ctxt;
+ struct dpaa2_sec_aead_ctxt aead_ctxt;
+ } ext_params;
+} dpaa2_sec_session;
+
+static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 128,
+ .max = 128,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 128,
+ .max = 128,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+#endif /* _RTE_DPAA2_SEC_PMD_PRIVATE_H_ */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/compat.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/compat.h
new file mode 100644
index 00000000..11fdaa8e
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/compat.h
@@ -0,0 +1,157 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_COMPAT_H__
+#define __RTA_COMPAT_H__
+
+#include <stdint.h>
+#include <errno.h>
+
+#ifdef __GLIBC__
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <rte_byteorder.h>
+
+#ifndef __BYTE_ORDER__
+#error "Undefined endianness"
+#endif
+
+#else
+#error Environment not supported!
+#endif
+
+#ifndef __always_inline
+#define __always_inline (inline __attribute__((always_inline)))
+#endif
+
+#ifndef __always_unused
+#define __always_unused __attribute__((unused))
+#endif
+
+#ifndef __maybe_unused
+#define __maybe_unused __attribute__((unused))
+#endif
+
+#if defined(__GLIBC__) && !defined(pr_debug)
+#if !defined(SUPPRESS_PRINTS) && defined(RTA_DEBUG)
+#define pr_debug(fmt, ...) \
+ RTE_LOG(DEBUG, PMD, "%s(): " fmt "\n", __func__, ##__VA_ARGS__)
+#else
+#define pr_debug(fmt, ...) do { } while (0)
+#endif
+#endif /* pr_debug */
+
+#if defined(__GLIBC__) && !defined(pr_err)
+#if !defined(SUPPRESS_PRINTS)
+#define pr_err(fmt, ...) \
+ RTE_LOG(ERR, PMD, "%s(): " fmt "\n", __func__, ##__VA_ARGS__)
+#else
+#define pr_err(fmt, ...) do { } while (0)
+#endif
+#endif /* pr_err */
+
+#if defined(__GLIBC__) && !defined(pr_warn)
+#if !defined(SUPPRESS_PRINTS)
+#define pr_warn(fmt, ...) \
+ RTE_LOG(WARNING, PMD, "%s(): " fmt "\n", __func__, ##__VA_ARGS__)
+#else
+#define pr_warn(fmt, ...) do { } while (0)
+#endif
+#endif /* pr_warn */
+
+/**
+ * ARRAY_SIZE - returns the number of elements in an array
+ * @x: array
+ */
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#ifndef ALIGN
+#define ALIGN(x, a) (((x) + ((__typeof__(x))(a) - 1)) & \
+ ~((__typeof__(x))(a) - 1))
+#endif
+
+#ifndef BIT
+#define BIT(nr) (1UL << (nr))
+#endif
+
+#ifndef upper_32_bits
+/**
+ * upper_32_bits - return bits 32-63 of a number
+ * @n: the number we're accessing
+ */
+#define upper_32_bits(n) ((uint32_t)(((n) >> 16) >> 16))
+#endif
+
+#ifndef lower_32_bits
+/**
+ * lower_32_bits - return bits 0-31 of a number
+ * @n: the number we're accessing
+ */
+#define lower_32_bits(n) ((uint32_t)(n))
+#endif
+
+/* Use Linux naming convention */
+#ifdef __GLIBC__
+ #define swab16(x) rte_bswap16(x)
+ #define swab32(x) rte_bswap32(x)
+ #define swab64(x) rte_bswap64(x)
+ /* Define cpu_to_be32 macro if not defined in the build environment */
+ #if !defined(cpu_to_be32)
+ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ #define cpu_to_be32(x) (x)
+ #else
+ #define cpu_to_be32(x) swab32(x)
+ #endif
+ #endif
+ /* Define cpu_to_le32 macro if not defined in the build environment */
+ #if !defined(cpu_to_le32)
+ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ #define cpu_to_le32(x) swab32(x)
+ #else
+ #define cpu_to_le32(x) (x)
+ #endif
+ #endif
+#endif
+
+#endif /* __RTA_COMPAT_H__ */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/desc.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/desc.h
new file mode 100644
index 00000000..5185be22
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/desc.h
@@ -0,0 +1,2601 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * SEC descriptor composition header.
+ * Definitions to support SEC descriptor instruction generation
+ */
+
+#ifndef __RTA_DESC_H__
+#define __RTA_DESC_H__
+
+/* hw/compat.h is not delivered in kernel */
+#ifndef __KERNEL__
+#include "hw/compat.h"
+#endif
+
+/* Max size of any SEC descriptor in 32-bit words, inclusive of header */
+#define MAX_CAAM_DESCSIZE 64
+
+#define CAAM_CMD_SZ sizeof(uint32_t)
+#define CAAM_PTR_SZ sizeof(dma_addr_t)
+#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
+#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 5 + CAAM_PTR_SZ * 3)
+
+/* Block size of any entity covered/uncovered with a KEK/TKEK */
+#define KEK_BLOCKSIZE 16
+
+/*
+ * Supported descriptor command types as they show up
+ * inside a descriptor command word.
+ */
+#define CMD_SHIFT 27
+#define CMD_MASK (0x1f << CMD_SHIFT)
+
+#define CMD_KEY (0x00 << CMD_SHIFT)
+#define CMD_SEQ_KEY (0x01 << CMD_SHIFT)
+#define CMD_LOAD (0x02 << CMD_SHIFT)
+#define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
+#define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
+#define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
+#define CMD_MOVEDW (0x06 << CMD_SHIFT)
+#define CMD_MOVEB (0x07 << CMD_SHIFT)
+#define CMD_STORE (0x0a << CMD_SHIFT)
+#define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
+#define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
+#define CMD_SEQ_FIFO_STORE (0x0d << CMD_SHIFT)
+#define CMD_MOVE_LEN (0x0e << CMD_SHIFT)
+#define CMD_MOVE (0x0f << CMD_SHIFT)
+#define CMD_OPERATION ((uint32_t)(0x10 << CMD_SHIFT))
+#define CMD_SIGNATURE ((uint32_t)(0x12 << CMD_SHIFT))
+#define CMD_JUMP ((uint32_t)(0x14 << CMD_SHIFT))
+#define CMD_MATH ((uint32_t)(0x15 << CMD_SHIFT))
+#define CMD_DESC_HDR ((uint32_t)(0x16 << CMD_SHIFT))
+#define CMD_SHARED_DESC_HDR ((uint32_t)(0x17 << CMD_SHIFT))
+#define CMD_MATHI ((uint32_t)(0x1d << CMD_SHIFT))
+#define CMD_SEQ_IN_PTR ((uint32_t)(0x1e << CMD_SHIFT))
+#define CMD_SEQ_OUT_PTR ((uint32_t)(0x1f << CMD_SHIFT))
+
+/* General-purpose class selector for all commands */
+#define CLASS_SHIFT 25
+#define CLASS_MASK (0x03 << CLASS_SHIFT)
+
+#define CLASS_NONE (0x00 << CLASS_SHIFT)
+#define CLASS_1 (0x01 << CLASS_SHIFT)
+#define CLASS_2 (0x02 << CLASS_SHIFT)
+#define CLASS_BOTH (0x03 << CLASS_SHIFT)
+
+/* ICV Check bits for Algo Operation command */
+#define ICV_CHECK_DISABLE 0
+#define ICV_CHECK_ENABLE 1
+
+/* Encap Mode check bits for Algo Operation command */
+#define DIR_ENC 1
+#define DIR_DEC 0
+
+/*
+ * Descriptor header command constructs
+ * Covers shared, job, and trusted descriptor headers
+ */
+
+/*
+ * Extended Job Descriptor Header
+ */
+#define HDR_EXT BIT(24)
+
+/*
+ * Read input frame as soon as possible (SHR HDR)
+ */
+#define HDR_RIF BIT(25)
+
+/*
+ * Require SEQ LIODN to be the Same (JOB HDR)
+ */
+#define HDR_RSLS BIT(25)
+
+/*
+ * Do Not Run - marks a descriptor not executable if there was
+ * a preceding error somewhere
+ */
+#define HDR_DNR BIT(24)
+
+/*
+ * ONE - should always be set. Combination of ONE (always
+ * set) and ZRO (always clear) forms an endianness sanity check
+ */
+#define HDR_ONE BIT(23)
+#define HDR_ZRO BIT(15)
+
+/* Start Index or SharedDesc Length */
+#define HDR_START_IDX_SHIFT 16
+#define HDR_START_IDX_MASK (0x3f << HDR_START_IDX_SHIFT)
+
+/* If shared descriptor header, 6-bit length */
+#define HDR_DESCLEN_SHR_MASK 0x3f
+
+/* If non-shared header, 7-bit length */
+#define HDR_DESCLEN_MASK 0x7f
+
+/* This is a TrustedDesc (if not SharedDesc) */
+#define HDR_TRUSTED BIT(14)
+
+/* Make into TrustedDesc (if not SharedDesc) */
+#define HDR_MAKE_TRUSTED BIT(13)
+
+/* Clear Input FiFO (if SharedDesc) */
+#define HDR_CLEAR_IFIFO BIT(13)
+
+/* Save context if self-shared (if SharedDesc) */
+#define HDR_SAVECTX BIT(12)
+
+/* Next item points to SharedDesc */
+#define HDR_SHARED BIT(12)
+
+/*
+ * Reverse Execution Order - execute JobDesc first, then
+ * execute SharedDesc (normally SharedDesc goes first).
+ */
+#define HDR_REVERSE BIT(11)
+
+/* Propagate DNR property to SharedDesc */
+#define HDR_PROP_DNR BIT(11)
+
+/* DECO Select Valid */
+#define HDR_EXT_DSEL_VALID BIT(7)
+
+/* Fake trusted descriptor */
+#define HDR_EXT_FTD BIT(8)
+
+/* JobDesc/SharedDesc share property */
+#define HDR_SD_SHARE_SHIFT 8
+#define HDR_SD_SHARE_MASK (0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_JD_SHARE_SHIFT 8
+#define HDR_JD_SHARE_MASK (0x07 << HDR_JD_SHARE_SHIFT)
+
+#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_SERIAL (0x02 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_ALWAYS (0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_DEFER (0x04 << HDR_SD_SHARE_SHIFT)
+
+/* JobDesc/SharedDesc descriptor length */
+#define HDR_JD_LENGTH_MASK 0x7f
+#define HDR_SD_LENGTH_MASK 0x3f
+
+/*
+ * KEY/SEQ_KEY Command Constructs
+ */
+
+/* Key Destination Class: 01 = Class 1, 02 - Class 2 */
+#define KEY_DEST_CLASS_SHIFT 25
+#define KEY_DEST_CLASS_MASK (0x03 << KEY_DEST_CLASS_SHIFT)
+#define KEY_DEST_CLASS1 (1 << KEY_DEST_CLASS_SHIFT)
+#define KEY_DEST_CLASS2 (2 << KEY_DEST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define KEY_SGF BIT(24)
+#define KEY_VLF BIT(24)
+
+/* Immediate - Key follows command in the descriptor */
+#define KEY_IMM BIT(23)
+
+/*
+ * Already in Input Data FIFO - the Input Data Sequence is not read, since it is
+ * already in the Input Data FIFO.
+ */
+#define KEY_AIDF BIT(23)
+
+/*
+ * Encrypted - Key is encrypted either with the KEK, or
+ * with the TDKEK if this descriptor is trusted
+ */
+#define KEY_ENC BIT(22)
+
+/*
+ * No Write Back - Do not allow key to be FIFO STOREd
+ */
+#define KEY_NWB BIT(21)
+
+/*
+ * Enhanced Encryption of Key
+ */
+#define KEY_EKT BIT(20)
+
+/*
+ * Encrypted with Trusted Key
+ */
+#define KEY_TK BIT(15)
+
+/*
+ * Plaintext Store
+ */
+#define KEY_PTS BIT(14)
+
+/*
+ * KDEST - Key Destination: 0 - class key register,
+ * 1 - PKHA 'e', 2 - AFHA Sbox, 3 - MDHA split key
+ */
+#define KEY_DEST_SHIFT 16
+#define KEY_DEST_MASK (0x03 << KEY_DEST_SHIFT)
+
+#define KEY_DEST_CLASS_REG (0x00 << KEY_DEST_SHIFT)
+#define KEY_DEST_PKHA_E (0x01 << KEY_DEST_SHIFT)
+#define KEY_DEST_AFHA_SBOX (0x02 << KEY_DEST_SHIFT)
+#define KEY_DEST_MDHA_SPLIT (0x03 << KEY_DEST_SHIFT)
+
+/* Length in bytes */
+#define KEY_LENGTH_MASK 0x000003ff
+
+/*
+ * LOAD/SEQ_LOAD/STORE/SEQ_STORE Command Constructs
+ */
+
+/*
+ * Load/Store Destination: 0 = class independent CCB,
+ * 1 = class 1 CCB, 2 = class 2 CCB, 3 = DECO
+ */
+#define LDST_CLASS_SHIFT 25
+#define LDST_CLASS_MASK (0x03 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_IND_CCB (0x00 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_1_CCB (0x01 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_2_CCB (0x02 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_DECO (0x03 << LDST_CLASS_SHIFT)
+
+/* Scatter-Gather Table/Variable Length Field */
+#define LDST_SGF BIT(24)
+#define LDST_VLF BIT(24)
+
+/* Immediate - Key follows this command in descriptor */
+#define LDST_IMM_MASK 1
+#define LDST_IMM_SHIFT 23
+#define LDST_IMM BIT(23)
+
+/* SRC/DST - Destination for LOAD, Source for STORE */
+#define LDST_SRCDST_SHIFT 16
+#define LDST_SRCDST_MASK (0x7f << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_BYTE_CONTEXT (0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_KEY (0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_INFIFO (0x7c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_OUTFIFO (0x7e << LDST_SRCDST_SHIFT)
+
+#define LDST_SRCDST_WORD_MODE_REG (0x00 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_JQCTRL (0x00 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_KEYSZ_REG (0x01 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_JQDAR (0x01 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DATASZ_REG (0x02 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_STAT (0x02 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ICVSZ_REG (0x03 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_DCHKSM (0x03 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PID (0x04 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CHACTRL (0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECOCTRL (0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_IRQCTRL (0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_PCLOVRD (0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLRW (0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH0 (0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_STAT (0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH1 (0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_IV_SZ (0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_GTR (0x10 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS_CTX (0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_STR (0x20 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_JOB (0x41 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_SZL (0x70 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_SZM (0x71 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_L (0x72 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_M (0x73 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_SZL (0x74 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_SZM (0x75 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_IFNSR (0x76 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_OFNSR (0x77 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_BYTE_ALTSOURCE (0x78 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
+
+/* Offset in source/destination */
+#define LDST_OFFSET_SHIFT 8
+#define LDST_OFFSET_MASK (0xff << LDST_OFFSET_SHIFT)
+
+/* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */
+/* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */
+#define LDOFF_CHG_SHARE_SHIFT 0
+#define LDOFF_CHG_SHARE_MASK (0x3 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_NEVER (0x1 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_PROP (0x2 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_NO_PROP (0x3 << LDOFF_CHG_SHARE_SHIFT)
+
+#define LDOFF_ENABLE_AUTO_NFIFO BIT(2)
+#define LDOFF_DISABLE_AUTO_NFIFO BIT(3)
+
+#define LDOFF_CHG_NONSEQLIODN_SHIFT 4
+#define LDOFF_CHG_NONSEQLIODN_MASK (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_SEQ (0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_TRUSTED (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+
+#define LDOFF_CHG_SEQLIODN_SHIFT 6
+#define LDOFF_CHG_SEQLIODN_MASK (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_SEQ (0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_TRUSTED (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+
+/* Data length in bytes */
+#define LDST_LEN_SHIFT 0
+#define LDST_LEN_MASK (0xff << LDST_LEN_SHIFT)
+
+/* Special Length definitions when dst=deco-ctrl */
+#define LDLEN_ENABLE_OSL_COUNT BIT(7)
+#define LDLEN_RST_CHA_OFIFO_PTR BIT(6)
+#define LDLEN_RST_OFIFO BIT(5)
+#define LDLEN_SET_OFIFO_OFF_VALID BIT(4)
+#define LDLEN_SET_OFIFO_OFF_RSVD BIT(3)
+#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
+#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
+
+/* CCB Clear Written Register bits */
+#define CLRW_CLR_C1MODE BIT(0)
+#define CLRW_CLR_C1DATAS BIT(2)
+#define CLRW_CLR_C1ICV BIT(3)
+#define CLRW_CLR_C1CTX BIT(5)
+#define CLRW_CLR_C1KEY BIT(6)
+#define CLRW_CLR_PK_A BIT(12)
+#define CLRW_CLR_PK_B BIT(13)
+#define CLRW_CLR_PK_N BIT(14)
+#define CLRW_CLR_PK_E BIT(15)
+#define CLRW_CLR_C2MODE BIT(16)
+#define CLRW_CLR_C2KEYS BIT(17)
+#define CLRW_CLR_C2DATAS BIT(18)
+#define CLRW_CLR_C2CTX BIT(21)
+#define CLRW_CLR_C2KEY BIT(22)
+#define CLRW_RESET_CLS2_DONE BIT(26) /* era 4 */
+#define CLRW_RESET_CLS1_DONE BIT(27) /* era 4 */
+#define CLRW_RESET_CLS2_CHA BIT(28) /* era 4 */
+#define CLRW_RESET_CLS1_CHA BIT(29) /* era 4 */
+#define CLRW_RESET_OFIFO BIT(30) /* era 3 */
+#define CLRW_RESET_IFIFO_DFIFO BIT(31) /* era 3 */
+
+/* CHA Control Register bits */
+#define CCTRL_RESET_CHA_ALL BIT(0)
+#define CCTRL_RESET_CHA_AESA BIT(1)
+#define CCTRL_RESET_CHA_DESA BIT(2)
+#define CCTRL_RESET_CHA_AFHA BIT(3)
+#define CCTRL_RESET_CHA_KFHA BIT(4)
+#define CCTRL_RESET_CHA_SF8A BIT(5)
+#define CCTRL_RESET_CHA_PKHA BIT(6)
+#define CCTRL_RESET_CHA_MDHA BIT(7)
+#define CCTRL_RESET_CHA_CRCA BIT(8)
+#define CCTRL_RESET_CHA_RNG BIT(9)
+#define CCTRL_RESET_CHA_SF9A BIT(10)
+#define CCTRL_RESET_CHA_ZUCE BIT(11)
+#define CCTRL_RESET_CHA_ZUCA BIT(12)
+#define CCTRL_UNLOAD_PK_A0 BIT(16)
+#define CCTRL_UNLOAD_PK_A1 BIT(17)
+#define CCTRL_UNLOAD_PK_A2 BIT(18)
+#define CCTRL_UNLOAD_PK_A3 BIT(19)
+#define CCTRL_UNLOAD_PK_B0 BIT(20)
+#define CCTRL_UNLOAD_PK_B1 BIT(21)
+#define CCTRL_UNLOAD_PK_B2 BIT(22)
+#define CCTRL_UNLOAD_PK_B3 BIT(23)
+#define CCTRL_UNLOAD_PK_N BIT(24)
+#define CCTRL_UNLOAD_PK_A BIT(26)
+#define CCTRL_UNLOAD_PK_B BIT(27)
+#define CCTRL_UNLOAD_SBOX BIT(28)
+
+/* IRQ Control Register (CxCIRQ) bits */
+#define CIRQ_ADI BIT(1)
+#define CIRQ_DDI BIT(2)
+#define CIRQ_RCDI BIT(3)
+#define CIRQ_KDI BIT(4)
+#define CIRQ_S8DI BIT(5)
+#define CIRQ_PDI BIT(6)
+#define CIRQ_MDI BIT(7)
+#define CIRQ_CDI BIT(8)
+#define CIRQ_RNDI BIT(9)
+#define CIRQ_S9DI BIT(10)
+#define CIRQ_ZEDI BIT(11) /* valid for Era 5 or higher */
+#define CIRQ_ZADI BIT(12) /* valid for Era 5 or higher */
+#define CIRQ_AEI BIT(17)
+#define CIRQ_DEI BIT(18)
+#define CIRQ_RCEI BIT(19)
+#define CIRQ_KEI BIT(20)
+#define CIRQ_S8EI BIT(21)
+#define CIRQ_PEI BIT(22)
+#define CIRQ_MEI BIT(23)
+#define CIRQ_CEI BIT(24)
+#define CIRQ_RNEI BIT(25)
+#define CIRQ_S9EI BIT(26)
+#define CIRQ_ZEEI BIT(27) /* valid for Era 5 or higher */
+#define CIRQ_ZAEI BIT(28) /* valid for Era 5 or higher */
+
+/*
+ * FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
+ * Command Constructs
+ */
+
+/*
+ * Load Destination: 0 = skip (SEQ_FIFO_LOAD only),
+ * 1 = Load for Class1, 2 = Load for Class2, 3 = Load both
+ * Store Source: 0 = normal, 1 = Class1key, 2 = Class2key
+ */
+#define FIFOLD_CLASS_SHIFT 25
+#define FIFOLD_CLASS_MASK (0x03 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_SKIP (0x00 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS1 (0x01 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS2 (0x02 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_BOTH (0x03 << FIFOLD_CLASS_SHIFT)
+
+#define FIFOST_CLASS_SHIFT 25
+#define FIFOST_CLASS_MASK (0x03 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_NORMAL (0x00 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS1KEY (0x01 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS2KEY (0x02 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_BOTH (0x03 << FIFOST_CLASS_SHIFT)
+
+/*
+ * Scatter-Gather Table/Variable Length Field
+ * If set for FIFO_LOAD, refers to a SG table. Within
+ * SEQ_FIFO_LOAD, is variable input sequence
+ */
+#define FIFOLDST_SGF_SHIFT 24
+#define FIFOLDST_SGF_MASK (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF_MASK (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_SGF BIT(24)
+#define FIFOLDST_VLF BIT(24)
+
+/*
+ * Immediate - Data follows command in descriptor
+ * AIDF - Already in Input Data FIFO
+ */
+#define FIFOLD_IMM_SHIFT 23
+#define FIFOLD_IMM_MASK (1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_AIDF_MASK (1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_IMM BIT(23)
+#define FIFOLD_AIDF BIT(23)
+
+#define FIFOST_IMM_SHIFT 23
+#define FIFOST_IMM_MASK (1 << FIFOST_IMM_SHIFT)
+#define FIFOST_IMM BIT(23)
+
+/* Continue - Not the last FIFO store to come */
+#define FIFOST_CONT_SHIFT 23
+#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
+#define FIFOST_CONT BIT(23)
+
+/*
+ * Extended Length - use 32-bit extended length that
+ * follows the pointer field. Illegal with IMM set
+ */
+#define FIFOLDST_EXT_SHIFT 22
+#define FIFOLDST_EXT_MASK (1 << FIFOLDST_EXT_SHIFT)
+#define FIFOLDST_EXT BIT(22)
+
+/* Input data type.*/
+#define FIFOLD_TYPE_SHIFT 16
+#define FIFOLD_CONT_TYPE_SHIFT 19 /* shift past last-flush bits */
+#define FIFOLD_TYPE_MASK (0x3f << FIFOLD_TYPE_SHIFT)
+
+/* PK types */
+#define FIFOLD_TYPE_PK (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_MASK (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_TYPEMASK (0x0f << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A0 (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A1 (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A2 (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A3 (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B0 (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B1 (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B2 (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B3 (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
+
+/* Other types. Need to OR in last/flush bits as desired */
+#define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG (0x10 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG1OUT2 (0x18 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_IV (0x20 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_BITDATA (0x28 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_AAD (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_ICV (0x38 << FIFOLD_TYPE_SHIFT)
+
+/* Last/Flush bits for use with "other" types above */
+#define FIFOLD_TYPE_ACT_MASK (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOACTION (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_FLUSH1 (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST1 (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2 (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOINFOFIFO (0x0f << FIFOLD_TYPE_SHIFT)
+
+#define FIFOLDST_LEN_MASK 0xffff
+#define FIFOLDST_EXT_LEN_MASK 0xffffffff
+
+/* Output data types */
+#define FIFOST_TYPE_SHIFT 16
+#define FIFOST_TYPE_MASK (0x3f << FIFOST_TYPE_SHIFT)
+
+#define FIFOST_TYPE_PKHA_A0 (0x00 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A1 (0x01 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A2 (0x02 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A3 (0x03 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B0 (0x04 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B1 (0x05 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B2 (0x06 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B3 (0x07 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_KEK (0x24 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_TKEK (0x25 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_KEK (0x26 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_TKEK (0x27 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_KEK (0x28 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_MESSAGE_DATA2 (0x31 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
+
+/*
+ * OPERATION Command Constructs
+ */
+
+/* Operation type selectors - OP TYPE */
+#define OP_TYPE_SHIFT 24
+#define OP_TYPE_MASK (0x07 << OP_TYPE_SHIFT)
+
+#define OP_TYPE_UNI_PROTOCOL (0x00 << OP_TYPE_SHIFT)
+#define OP_TYPE_PK (0x01 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS1_ALG (0x02 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS2_ALG (0x04 << OP_TYPE_SHIFT)
+#define OP_TYPE_DECAP_PROTOCOL (0x06 << OP_TYPE_SHIFT)
+#define OP_TYPE_ENCAP_PROTOCOL (0x07 << OP_TYPE_SHIFT)
+
+/* ProtocolID selectors - PROTID */
+#define OP_PCLID_SHIFT 16
+#define OP_PCLID_MASK (0xff << OP_PCLID_SHIFT)
+
+/* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */
+#define OP_PCLID_IKEV1_PRF (0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_IKEV2_PRF (0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30_PRF (0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10_PRF (0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11_PRF (0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS12_PRF (0x0b << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS10_PRF (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
+#define OP_PCLID_DIFFIEHELLMAN (0x17 << OP_PCLID_SHIFT)
+#define OP_PCLID_RSAENCRYPT (0x18 << OP_PCLID_SHIFT)
+#define OP_PCLID_RSADECRYPT (0x19 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
+#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
+
+/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
+#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_SRTP (0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_MACSEC (0x03 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIFI (0x04 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIMAX (0x05 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30 (0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10 (0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11 (0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS12 (0x0b << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS10 (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT)
+#define OP_PCLID_IPSEC_NEW (0x11 << OP_PCLID_SHIFT)
+#define OP_PCLID_3G_DCRC (0x31 << OP_PCLID_SHIFT)
+#define OP_PCLID_3G_RLC_PDU (0x32 << OP_PCLID_SHIFT)
+#define OP_PCLID_3G_RLC_SDU (0x33 << OP_PCLID_SHIFT)
+#define OP_PCLID_LTE_PDCP_USER (0x42 << OP_PCLID_SHIFT)
+#define OP_PCLID_LTE_PDCP_CTRL (0x43 << OP_PCLID_SHIFT)
+#define OP_PCLID_LTE_PDCP_CTRL_MIXED (0x44 << OP_PCLID_SHIFT)
+
+/*
+ * ProtocolInfo selectors
+ */
+#define OP_PCLINFO_MASK 0xffff
+
+/* for OP_PCLID_IPSEC */
+#define OP_PCL_IPSEC_CIPHER_MASK 0xff00
+#define OP_PCL_IPSEC_AUTH_MASK 0x00ff
+
+#define OP_PCL_IPSEC_DES_IV64 0x0100
+#define OP_PCL_IPSEC_DES 0x0200
+#define OP_PCL_IPSEC_3DES 0x0300
+#define OP_PCL_IPSEC_NULL 0x0B00
+#define OP_PCL_IPSEC_AES_CBC 0x0c00
+#define OP_PCL_IPSEC_AES_CTR 0x0d00
+#define OP_PCL_IPSEC_AES_XTS 0x1600
+#define OP_PCL_IPSEC_AES_CCM8 0x0e00
+#define OP_PCL_IPSEC_AES_CCM12 0x0f00
+#define OP_PCL_IPSEC_AES_CCM16 0x1000
+#define OP_PCL_IPSEC_AES_GCM8 0x1200
+#define OP_PCL_IPSEC_AES_GCM12 0x1300
+#define OP_PCL_IPSEC_AES_GCM16 0x1400
+#define OP_PCL_IPSEC_AES_NULL_WITH_GMAC 0x1500
+
+#define OP_PCL_IPSEC_HMAC_NULL 0x0000
+#define OP_PCL_IPSEC_HMAC_MD5_96 0x0001
+#define OP_PCL_IPSEC_HMAC_SHA1_96 0x0002
+#define OP_PCL_IPSEC_AES_XCBC_MAC_96 0x0005
+#define OP_PCL_IPSEC_HMAC_MD5_128 0x0006
+#define OP_PCL_IPSEC_HMAC_SHA1_160 0x0007
+#define OP_PCL_IPSEC_AES_CMAC_96 0x0008
+#define OP_PCL_IPSEC_HMAC_SHA2_256_128 0x000c
+#define OP_PCL_IPSEC_HMAC_SHA2_384_192 0x000d
+#define OP_PCL_IPSEC_HMAC_SHA2_512_256 0x000e
+
+/* For SRTP - OP_PCLID_SRTP */
+#define OP_PCL_SRTP_CIPHER_MASK 0xff00
+#define OP_PCL_SRTP_AUTH_MASK 0x00ff
+
+#define OP_PCL_SRTP_AES_CTR 0x0d00
+
+#define OP_PCL_SRTP_HMAC_SHA1_160 0x0007
+
+/* For SSL 3.0 - OP_PCLID_SSL30 */
+#define OP_PCL_SSL30_AES_128_CBC_SHA 0x002f
+#define OP_PCL_SSL30_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_SSL30_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_SSL30_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_SSL30_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_SSL30_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_SSL30_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_SSL30_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_SSL30_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_SSL30_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_SSL30_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_SSL30_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_SSL30_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_SSL30_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_SSL30_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_SSL30_AES_256_CBC_SHA 0x0035
+#define OP_PCL_SSL30_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_SSL30_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_SSL30_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_SSL30_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_SSL30_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_SSL30_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_SSL30_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_SSL30_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_SSL30_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_SSL30_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_SSL30_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_SSL30_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_SSL30_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_SSL30_AES_256_CBC_SHA_17 0xc022
+
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_1 0x009C
+#define OP_PCL_SSL30_AES_256_GCM_SHA384_1 0x009D
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_2 0x009E
+#define OP_PCL_SSL30_AES_256_GCM_SHA384_2 0x009F
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_3 0x00A0
+#define OP_PCL_SSL30_AES_256_GCM_SHA384_3 0x00A1
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_4 0x00A2
+#define OP_PCL_SSL30_AES_256_GCM_SHA384_4 0x00A3
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_5 0x00A4
+#define OP_PCL_SSL30_AES_256_GCM_SHA384_5 0x00A5
+#define OP_PCL_SSL30_AES_128_GCM_SHA256_6 0x00A6
+
+#define OP_PCL_TLS_DH_ANON_AES_256_GCM_SHA384 0x00A7
+#define OP_PCL_TLS_PSK_AES_128_GCM_SHA256 0x00A8
+#define OP_PCL_TLS_PSK_AES_256_GCM_SHA384 0x00A9
+#define OP_PCL_TLS_DHE_PSK_AES_128_GCM_SHA256 0x00AA
+#define OP_PCL_TLS_DHE_PSK_AES_256_GCM_SHA384 0x00AB
+#define OP_PCL_TLS_RSA_PSK_AES_128_GCM_SHA256 0x00AC
+#define OP_PCL_TLS_RSA_PSK_AES_256_GCM_SHA384 0x00AD
+#define OP_PCL_TLS_PSK_AES_128_CBC_SHA256 0x00AE
+#define OP_PCL_TLS_PSK_AES_256_CBC_SHA384 0x00AF
+#define OP_PCL_TLS_DHE_PSK_AES_128_CBC_SHA256 0x00B2
+#define OP_PCL_TLS_DHE_PSK_AES_256_CBC_SHA384 0x00B3
+#define OP_PCL_TLS_RSA_PSK_AES_128_CBC_SHA256 0x00B6
+#define OP_PCL_TLS_RSA_PSK_AES_256_CBC_SHA384 0x00B7
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_MD5 0x0023
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_SSL30_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_SSL30_DES_CBC_MD5 0x0022
+
+#define OP_PCL_SSL30_DES40_CBC_SHA 0x0008
+#define OP_PCL_SSL30_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_SSL30_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_SSL30_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_SSL30_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_SSL30_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_SSL30_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_SSL30_DES_CBC_SHA 0x001e
+#define OP_PCL_SSL30_DES_CBC_SHA_2 0x0009
+#define OP_PCL_SSL30_DES_CBC_SHA_3 0x000c
+#define OP_PCL_SSL30_DES_CBC_SHA_4 0x000f
+#define OP_PCL_SSL30_DES_CBC_SHA_5 0x0012
+#define OP_PCL_SSL30_DES_CBC_SHA_6 0x0015
+#define OP_PCL_SSL30_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_SSL30_RC4_128_MD5 0x0024
+#define OP_PCL_SSL30_RC4_128_MD5_2 0x0004
+#define OP_PCL_SSL30_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_SSL30_RC4_40_MD5 0x002b
+#define OP_PCL_SSL30_RC4_40_MD5_2 0x0003
+#define OP_PCL_SSL30_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_SSL30_RC4_128_SHA 0x0020
+#define OP_PCL_SSL30_RC4_128_SHA_2 0x008a
+#define OP_PCL_SSL30_RC4_128_SHA_3 0x008e
+#define OP_PCL_SSL30_RC4_128_SHA_4 0x0092
+#define OP_PCL_SSL30_RC4_128_SHA_5 0x0005
+#define OP_PCL_SSL30_RC4_128_SHA_6 0xc002
+#define OP_PCL_SSL30_RC4_128_SHA_7 0xc007
+#define OP_PCL_SSL30_RC4_128_SHA_8 0xc00c
+#define OP_PCL_SSL30_RC4_128_SHA_9 0xc011
+#define OP_PCL_SSL30_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_SSL30_RC4_40_SHA 0x0028
+
+/* For TLS 1.0 - OP_PCLID_TLS10 */
+#define OP_PCL_TLS10_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS10_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS10_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS10_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS10_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS10_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS10_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS10_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS10_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS10_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS10_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS10_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS10_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS10_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS10_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS10_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS10_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS10_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS10_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS10_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS10_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS10_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS10_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS10_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS10_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS10_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS10_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS10_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS10_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS10_AES_256_CBC_SHA_17 0xc022
+
+#define OP_PCL_TLS_ECDHE_ECDSA_AES_128_CBC_SHA256 0xC023
+#define OP_PCL_TLS_ECDHE_ECDSA_AES_256_CBC_SHA384 0xC024
+#define OP_PCL_TLS_ECDH_ECDSA_AES_128_CBC_SHA256 0xC025
+#define OP_PCL_TLS_ECDH_ECDSA_AES_256_CBC_SHA384 0xC026
+#define OP_PCL_TLS_ECDHE_RSA_AES_128_CBC_SHA256 0xC027
+#define OP_PCL_TLS_ECDHE_RSA_AES_256_CBC_SHA384 0xC028
+#define OP_PCL_TLS_ECDH_RSA_AES_128_CBC_SHA256 0xC029
+#define OP_PCL_TLS_ECDH_RSA_AES_256_CBC_SHA384 0xC02A
+#define OP_PCL_TLS_ECDHE_ECDSA_AES_128_GCM_SHA256 0xC02B
+#define OP_PCL_TLS_ECDHE_ECDSA_AES_256_GCM_SHA384 0xC02C
+#define OP_PCL_TLS_ECDH_ECDSA_AES_128_GCM_SHA256 0xC02D
+#define OP_PCL_TLS_ECDH_ECDSA_AES_256_GCM_SHA384 0xC02E
+#define OP_PCL_TLS_ECDHE_RSA_AES_128_GCM_SHA256 0xC02F
+#define OP_PCL_TLS_ECDHE_RSA_AES_256_GCM_SHA384 0xC030
+#define OP_PCL_TLS_ECDH_RSA_AES_128_GCM_SHA256 0xC031
+#define OP_PCL_TLS_ECDH_RSA_AES_256_GCM_SHA384 0xC032
+#define OP_PCL_TLS_ECDHE_PSK_RC4_128_SHA 0xC033
+#define OP_PCL_TLS_ECDHE_PSK_3DES_EDE_CBC_SHA 0xC034
+#define OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA 0xC035
+#define OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA 0xC036
+#define OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA256 0xC037
+#define OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA384 0xC038
+
+/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS10_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS10_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS10_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS10_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS10_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS10_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS10_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS10_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS10_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_TLS10_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS10_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS10_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS10_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS10_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS10_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS10_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS10_RC4_128_MD5 0x0024
+#define OP_PCL_TLS10_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS10_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS10_RC4_40_MD5 0x002b
+#define OP_PCL_TLS10_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS10_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS10_RC4_128_SHA 0x0020
+#define OP_PCL_TLS10_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS10_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS10_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS10_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS10_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS10_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS10_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS10_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS10_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS10_RC4_40_SHA 0x0028
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS10_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS10_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS10_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS10_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS10_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS10_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS10_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS10_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS10_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS10_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS10_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS10_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS10_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS10_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS10_AES_256_CBC_SHA512 0xff65
+
+#define OP_PCL_TLS_PVT_AES_192_CBC_SHA160 0xff90
+#define OP_PCL_TLS_PVT_AES_192_CBC_SHA384 0xff93
+#define OP_PCL_TLS_PVT_AES_192_CBC_SHA224 0xff94
+#define OP_PCL_TLS_PVT_AES_192_CBC_SHA512 0xff95
+#define OP_PCL_TLS_PVT_AES_192_CBC_SHA256 0xff96
+#define OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FE 0xfffe
+#define OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FF 0xffff
+
+/* For TLS 1.1 - OP_PCLID_TLS11 */
+#define OP_PCL_TLS11_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS11_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS11_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS11_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS11_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS11_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS11_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS11_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS11_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS11_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS11_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS11_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS11_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS11_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS11_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS11_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS11_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS11_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS11_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS11_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS11_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS11_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS11_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS11_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS11_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS11_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS11_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS11_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS11_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS11_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS11_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS11_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS11_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS11_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS11_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS11_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS11_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS11_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS11_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_TLS11_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS11_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS11_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS11_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS11_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS11_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS11_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS11_RC4_128_MD5 0x0024
+#define OP_PCL_TLS11_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS11_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS11_RC4_40_MD5 0x002b
+#define OP_PCL_TLS11_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS11_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS11_RC4_128_SHA 0x0020
+#define OP_PCL_TLS11_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS11_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS11_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS11_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS11_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS11_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS11_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS11_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS11_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS11_RC4_40_SHA 0x0028
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS11_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS11_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS11_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS11_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS11_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS11_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS11_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS11_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS11_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS11_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS11_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS11_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS11_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS11_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS11_AES_256_CBC_SHA512 0xff65
+
+
+/* For TLS 1.2 - OP_PCLID_TLS12 */
+#define OP_PCL_TLS12_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS12_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS12_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS12_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS12_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS12_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS12_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS12_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS12_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS12_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS12_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS12_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS12_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS12_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS12_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS12_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS12_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS12_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS12_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS12_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS12_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS12_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS12_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS12_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS12_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS12_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS12_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS12_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS12_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS12_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS12_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS12_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS12_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS12_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS12_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS12_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS12_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS12_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS12_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_TLS12_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS12_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS12_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS12_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS12_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS12_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS12_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS12_RC4_128_MD5 0x0024
+#define OP_PCL_TLS12_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS12_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS12_RC4_40_MD5 0x002b
+#define OP_PCL_TLS12_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS12_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS12_RC4_128_SHA 0x0020
+#define OP_PCL_TLS12_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS12_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS12_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS12_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS12_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS12_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS12_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS12_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS12_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS12_RC4_40_SHA 0x0028
+
+/* #define OP_PCL_TLS12_AES_128_CBC_SHA256 0x003c */
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_2 0x003e
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_3 0x003f
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_4 0x0040
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_5 0x0067
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_6 0x006c
+
+/* #define OP_PCL_TLS12_AES_256_CBC_SHA256 0x003d */
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_2 0x0068
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_3 0x0069
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_4 0x006a
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_5 0x006b
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_6 0x006d
+
+/* AEAD_AES_xxx_CCM/GCM remain to be defined... */
+
+#define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS12_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS12_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS12_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS12_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS12_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS12_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS12_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS12_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS12_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS12_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS12_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS12_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS12_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65
+
+/* For DTLS - OP_PCLID_DTLS */
+
+#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f
+#define OP_PCL_DTLS_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_DTLS_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_DTLS_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_DTLS_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_DTLS_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_DTLS_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_DTLS_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_DTLS_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_DTLS_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_DTLS_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_DTLS_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_DTLS_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_DTLS_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_DTLS_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_DTLS_AES_256_CBC_SHA 0x0035
+#define OP_PCL_DTLS_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_DTLS_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_DTLS_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_DTLS_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_DTLS_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_DTLS_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_DTLS_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_DTLS_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_DTLS_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_DTLS_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_DTLS_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_DTLS_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_DTLS_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_DTLS_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_DTLS_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_DTLS_DES_CBC_MD5 0x0022
+
+#define OP_PCL_DTLS_DES40_CBC_SHA 0x0008
+#define OP_PCL_DTLS_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_DTLS_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_DTLS_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_DTLS_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_DTLS_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_DTLS_DES40_CBC_SHA_7 0x0026
+
+
+#define OP_PCL_DTLS_DES_CBC_SHA 0x001e
+#define OP_PCL_DTLS_DES_CBC_SHA_2 0x0009
+#define OP_PCL_DTLS_DES_CBC_SHA_3 0x000c
+#define OP_PCL_DTLS_DES_CBC_SHA_4 0x000f
+#define OP_PCL_DTLS_DES_CBC_SHA_5 0x0012
+#define OP_PCL_DTLS_DES_CBC_SHA_6 0x0015
+#define OP_PCL_DTLS_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_DTLS_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_DTLS_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_DTLS_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_DTLS_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_DTLS_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_DTLS_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_DTLS_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_DTLS_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_DTLS_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_DTLS_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_DTLS_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_DTLS_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_DTLS_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_DTLS_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_DTLS_AES_256_CBC_SHA512 0xff65
+
+/* 802.16 WiMAX protinfos */
+#define OP_PCL_WIMAX_OFDM 0x0201
+#define OP_PCL_WIMAX_OFDMA 0x0231
+
+/* 802.11 WiFi protinfos */
+#define OP_PCL_WIFI 0xac04
+
+/* MacSec protinfos */
+#define OP_PCL_MACSEC 0x0001
+
+/* 3G DCRC protinfos */
+#define OP_PCL_3G_DCRC_CRC7 0x0710
+#define OP_PCL_3G_DCRC_CRC11 0x0B10
+
+/* 3G RLC protinfos */
+#define OP_PCL_3G_RLC_NULL 0x0000
+#define OP_PCL_3G_RLC_KASUMI 0x0001
+#define OP_PCL_3G_RLC_SNOW 0x0002
+
+/* LTE protinfos */
+#define OP_PCL_LTE_NULL 0x0000
+#define OP_PCL_LTE_SNOW 0x0001
+#define OP_PCL_LTE_AES 0x0002
+#define OP_PCL_LTE_ZUC 0x0003
+
+/* LTE mixed protinfos */
+#define OP_PCL_LTE_MIXED_AUTH_SHIFT 0
+#define OP_PCL_LTE_MIXED_AUTH_MASK (3 << OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_SHIFT 8
+#define OP_PCL_LTE_MIXED_ENC_MASK (3 < OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_AUTH_NULL (OP_PCL_LTE_NULL << \
+ OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_AUTH_SNOW (OP_PCL_LTE_SNOW << \
+ OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_AUTH_AES (OP_PCL_LTE_AES << \
+ OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_AUTH_ZUC (OP_PCL_LTE_ZUC << \
+ OP_PCL_LTE_MIXED_AUTH_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_NULL (OP_PCL_LTE_NULL << \
+ OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_SNOW (OP_PCL_LTE_SNOW << \
+ OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_AES (OP_PCL_LTE_AES << \
+ OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_ZUC (OP_PCL_LTE_ZUC << \
+ OP_PCL_LTE_MIXED_ENC_SHIFT)
+
+/* PKI unidirectional protocol protinfo bits */
+#define OP_PCL_PKPROT_DSA_MSG BIT(10)
+#define OP_PCL_PKPROT_HASH_SHIFT 7
+#define OP_PCL_PKPROT_HASH_MASK (7 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_MD5 (0 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA1 (1 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA224 (2 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA256 (3 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA384 (4 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_HASH_SHA512 (5 << OP_PCL_PKPROT_HASH_SHIFT)
+#define OP_PCL_PKPROT_EKT_Z BIT(6)
+#define OP_PCL_PKPROT_DECRYPT_Z BIT(5)
+#define OP_PCL_PKPROT_EKT_PRI BIT(4)
+#define OP_PCL_PKPROT_TEST BIT(3)
+#define OP_PCL_PKPROT_DECRYPT_PRI BIT(2)
+#define OP_PCL_PKPROT_ECC BIT(1)
+#define OP_PCL_PKPROT_F2M BIT(0)
+
+/* Blob protinfos */
+#define OP_PCL_BLOB_TKEK_SHIFT 9
+#define OP_PCL_BLOB_TKEK BIT(9)
+#define OP_PCL_BLOB_EKT_SHIFT 8
+#define OP_PCL_BLOB_EKT BIT(8)
+#define OP_PCL_BLOB_REG_SHIFT 4
+#define OP_PCL_BLOB_REG_MASK (0xF << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_MEMORY (0x0 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_KEY1 (0x1 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_KEY2 (0x3 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_AFHA_SBOX (0x5 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_SPLIT (0x7 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_REG_PKE (0x9 << OP_PCL_BLOB_REG_SHIFT)
+#define OP_PCL_BLOB_SEC_MEM_SHIFT 3
+#define OP_PCL_BLOB_SEC_MEM BIT(3)
+#define OP_PCL_BLOB_BLACK BIT(2)
+#define OP_PCL_BLOB_FORMAT_SHIFT 0
+#define OP_PCL_BLOB_FORMAT_MASK 0x3
+#define OP_PCL_BLOB_FORMAT_NORMAL 0
+#define OP_PCL_BLOB_FORMAT_MASTER_VER 2
+#define OP_PCL_BLOB_FORMAT_TEST 3
+
+/* IKE / IKEv2 protinfos */
+#define OP_PCL_IKE_HMAC_MD5 0x0100
+#define OP_PCL_IKE_HMAC_SHA1 0x0200
+#define OP_PCL_IKE_HMAC_AES128_CBC 0x0400
+#define OP_PCL_IKE_HMAC_SHA256 0x0500
+#define OP_PCL_IKE_HMAC_SHA384 0x0600
+#define OP_PCL_IKE_HMAC_SHA512 0x0700
+#define OP_PCL_IKE_HMAC_AES128_CMAC 0x0800
+
+/* PKI unidirectional protocol protinfo bits */
+#define OP_PCL_PKPROT_TEST BIT(3)
+#define OP_PCL_PKPROT_DECRYPT BIT(2)
+#define OP_PCL_PKPROT_ECC BIT(1)
+#define OP_PCL_PKPROT_F2M BIT(0)
+
+/* RSA Protinfo */
+#define OP_PCL_RSAPROT_OP_MASK 3
+#define OP_PCL_RSAPROT_OP_ENC_F_IN 0
+#define OP_PCL_RSAPROT_OP_ENC_F_OUT 1
+#define OP_PCL_RSAPROT_OP_DEC_ND 0
+#define OP_PCL_RSAPROT_OP_DEC_PQD 1
+#define OP_PCL_RSAPROT_OP_DEC_PQDPDQC 2
+#define OP_PCL_RSAPROT_FFF_SHIFT 4
+#define OP_PCL_RSAPROT_FFF_MASK (7 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_RED (0 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_ENC (1 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_TK_ENC (5 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_EKT (3 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_FFF_TK_EKT (7 << OP_PCL_RSAPROT_FFF_SHIFT)
+#define OP_PCL_RSAPROT_PPP_SHIFT 8
+#define OP_PCL_RSAPROT_PPP_MASK (7 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_RED (0 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_ENC (1 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_TK_ENC (5 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_EKT (3 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_PPP_TK_EKT (7 << OP_PCL_RSAPROT_PPP_SHIFT)
+#define OP_PCL_RSAPROT_FMT_PKCSV15 BIT(12)
+
+/* Derived Key Protocol (DKP) Protinfo */
+#define OP_PCL_DKP_SRC_SHIFT 14
+#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
+#define OP_PCL_DKP_DST_SHIFT 12
+#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
+#define OP_PCL_DKP_KEY_SHIFT 0
+#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
+
+/* For non-protocol/alg-only op commands */
+#define OP_ALG_TYPE_SHIFT 24
+#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS1 (0x2 << OP_ALG_TYPE_SHIFT)
+#define OP_ALG_TYPE_CLASS2 (0x4 << OP_ALG_TYPE_SHIFT)
+
+#define OP_ALG_ALGSEL_SHIFT 16
+#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_AES (0x10 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_DES (0x20 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_3DES (0x21 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_ARC4 (0x30 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_MD5 (0x40 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA1 (0x41 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA224 (0x42 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA256 (0x43 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA384 (0x44 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SHA512 (0x45 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_RNG (0x50 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F8 (0x60 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_ZUCE (0xB0 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_ZUCA (0xC0 << OP_ALG_ALGSEL_SHIFT)
+
+#define OP_ALG_AAI_SHIFT 4
+#define OP_ALG_AAI_MASK (0x3ff << OP_ALG_AAI_SHIFT)
+
+/* block cipher AAI set */
+#define OP_ALG_AESA_MODE_MASK (0xF0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD128 (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD8 (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD16 (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD24 (0x03 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD32 (0x04 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD40 (0x05 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD48 (0x06 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD56 (0x07 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD64 (0x08 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD72 (0x09 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD80 (0x0a << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD88 (0x0b << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD96 (0x0c << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD104 (0x0d << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD112 (0x0e << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_MOD120 (0x0f << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_ECB (0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CFB (0x30 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_OFB (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_XTS (0x50 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CMAC (0x60 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_XCBC_MAC (0x70 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CCM (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_GCM (0x90 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC_XCBCMAC (0xa0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_XCBCMAC (0xb0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CBC_CMAC (0xc0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_CMAC_LTE (0xd0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CTR_CMAC (0xe0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CHECKODD (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DK (0x100 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_C2K (0x200 << OP_ALG_AAI_SHIFT)
+
+/* randomizer AAI set */
+#define OP_ALG_RNG_MODE_MASK (0x30 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_NZB (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_OBP (0x20 << OP_ALG_AAI_SHIFT)
+
+/* RNG4 AAI set */
+#define OP_ALG_AAI_RNG4_SH_SHIFT OP_ALG_AAI_SHIFT
+#define OP_ALG_AAI_RNG4_SH_MASK (0x03 << OP_ALG_AAI_RNG4_SH_SHIFT)
+#define OP_ALG_AAI_RNG4_SH_0 (0x00 << OP_ALG_AAI_RNG4_SH_SHIFT)
+#define OP_ALG_AAI_RNG4_SH_1 (0x01 << OP_ALG_AAI_RNG4_SH_SHIFT)
+#define OP_ALG_AAI_RNG4_PS (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
+
+/* hmac/smac AAI set */
+#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_SMAC (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_HMAC_PRECOMP (0x04 << OP_ALG_AAI_SHIFT)
+
+/* CRC AAI set*/
+#define OP_ALG_CRC_POLY_MASK (0x07 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_802 (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_3385 (0x02 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_CUST_POLY (0x04 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DIS (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DOS (0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_DOC (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_IVZ (0x80 << OP_ALG_AAI_SHIFT)
+
+/* Kasumi/SNOW/ZUC AAI set */
+#define OP_ALG_AAI_F8 (0xc0 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_F9 (0xc8 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_GSM (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_EDGE (0x20 << OP_ALG_AAI_SHIFT)
+
+#define OP_ALG_AS_SHIFT 2
+#define OP_ALG_AS_MASK (0x3 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_UPDATE (0 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INIT (1 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_FINALIZE (2 << OP_ALG_AS_SHIFT)
+#define OP_ALG_AS_INITFINAL (3 << OP_ALG_AS_SHIFT)
+
+#define OP_ALG_ICV_SHIFT 1
+#define OP_ALG_ICV_MASK (1 << OP_ALG_ICV_SHIFT)
+#define OP_ALG_ICV_OFF 0
+#define OP_ALG_ICV_ON BIT(1)
+
+#define OP_ALG_DIR_SHIFT 0
+#define OP_ALG_DIR_MASK 1
+#define OP_ALG_DECRYPT 0
+#define OP_ALG_ENCRYPT BIT(0)
+
+/* PKHA algorithm type set */
+#define OP_ALG_PK 0x00800000
+#define OP_ALG_PK_FUN_MASK 0x3f /* clrmem, modmath, or cpymem */
+
+/* PKHA mode clear memory functions */
+#define OP_ALG_PKMODE_A_RAM BIT(19)
+#define OP_ALG_PKMODE_B_RAM BIT(18)
+#define OP_ALG_PKMODE_E_RAM BIT(17)
+#define OP_ALG_PKMODE_N_RAM BIT(16)
+#define OP_ALG_PKMODE_CLEARMEM BIT(0)
+
+/* PKHA mode clear memory functions */
+#define OP_ALG_PKMODE_CLEARMEM_ALL (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_N_RAM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_ABE (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_ABN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_AB (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_B_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_AEN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_E_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_AE (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_AN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_A (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_A_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_BEN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_E_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_BE (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_BN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_B_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_B (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_B_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_EN (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_E_RAM | \
+ OP_ALG_PKMODE_N_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_E (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_E_RAM)
+#define OP_ALG_PKMODE_CLEARMEM_N (OP_ALG_PKMODE_CLEARMEM | \
+ OP_ALG_PKMODE_N_RAM)
+
+/* PKHA mode modular-arithmetic functions */
+#define OP_ALG_PKMODE_MOD_IN_MONTY BIT(19)
+#define OP_ALG_PKMODE_MOD_OUT_MONTY BIT(18)
+#define OP_ALG_PKMODE_MOD_F2M BIT(17)
+#define OP_ALG_PKMODE_MOD_R2_IN BIT(16)
+#define OP_ALG_PKMODE_PRJECTV BIT(11)
+#define OP_ALG_PKMODE_TIME_EQ BIT(10)
+
+#define OP_ALG_PKMODE_OUT_B 0x000
+#define OP_ALG_PKMODE_OUT_A 0x100
+
+/*
+ * PKHA mode modular-arithmetic integer functions
+ * Can be ORed with OP_ALG_PKMODE_OUT_A to change destination from B
+ */
+#define OP_ALG_PKMODE_MOD_ADD 0x002
+#define OP_ALG_PKMODE_MOD_SUB_AB 0x003
+#define OP_ALG_PKMODE_MOD_SUB_BA 0x004
+#define OP_ALG_PKMODE_MOD_MULT 0x005
+#define OP_ALG_PKMODE_MOD_MULT_IM (0x005 | OP_ALG_PKMODE_MOD_IN_MONTY)
+#define OP_ALG_PKMODE_MOD_MULT_IM_OM (0x005 | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY)
+#define OP_ALG_PKMODE_MOD_EXPO 0x006
+#define OP_ALG_PKMODE_MOD_EXPO_TEQ (0x006 | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_MOD_EXPO_IM (0x006 | OP_ALG_PKMODE_MOD_IN_MONTY)
+#define OP_ALG_PKMODE_MOD_EXPO_IM_TEQ (0x006 | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_MOD_REDUCT 0x007
+#define OP_ALG_PKMODE_MOD_INV 0x008
+#define OP_ALG_PKMODE_MOD_ECC_ADD 0x009
+#define OP_ALG_PKMODE_MOD_ECC_DBL 0x00a
+#define OP_ALG_PKMODE_MOD_ECC_MULT 0x00b
+#define OP_ALG_PKMODE_MOD_MONT_CNST 0x00c
+#define OP_ALG_PKMODE_MOD_CRT_CNST 0x00d
+#define OP_ALG_PKMODE_MOD_GCD 0x00e
+#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
+#define OP_ALG_PKMODE_MOD_SML_EXP 0x016
+
+/*
+ * PKHA mode modular-arithmetic F2m functions
+ * Can be ORed with OP_ALG_PKMODE_OUT_A to change destination from B
+ */
+#define OP_ALG_PKMODE_F2M_ADD (0x002 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_MUL (0x005 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_MUL_IM (0x005 | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_IN_MONTY)
+#define OP_ALG_PKMODE_F2M_MUL_IM_OM (0x005 | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY)
+#define OP_ALG_PKMODE_F2M_EXP (0x006 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_EXP_TEQ (0x006 | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_F2M_AMODN (0x007 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_INV (0x008 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_R2 (0x00c | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_GCD (0x00e | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_F2M_SML_EXP (0x016 | OP_ALG_PKMODE_MOD_F2M)
+
+/*
+ * PKHA mode ECC Integer arithmetic functions
+ * Can be ORed with OP_ALG_PKMODE_OUT_A to change destination from B
+ */
+#define OP_ALG_PKMODE_ECC_MOD_ADD 0x009
+#define OP_ALG_PKMODE_ECC_MOD_ADD_IM_OM_PROJ \
+ (0x009 | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_MOD_DBL 0x00a
+#define OP_ALG_PKMODE_ECC_MOD_DBL_IM_OM_PROJ \
+ (0x00a | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_MOD_MUL 0x00b
+#define OP_ALG_PKMODE_ECC_MOD_MUL_TEQ (0x00b | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_ECC_MOD_MUL_R2 (0x00b | OP_ALG_PKMODE_MOD_R2_IN)
+#define OP_ALG_PKMODE_ECC_MOD_MUL_R2_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_ECC_MOD_MUL_R2_PROJ \
+ (0x00b | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_MOD_MUL_R2_PROJ_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_PRJECTV \
+ | OP_ALG_PKMODE_TIME_EQ)
+
+/*
+ * PKHA mode ECC F2m arithmetic functions
+ * Can be ORed with OP_ALG_PKMODE_OUT_A to change destination from B
+ */
+#define OP_ALG_PKMODE_ECC_F2M_ADD (0x009 | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_ECC_F2M_ADD_IM_OM_PROJ \
+ (0x009 | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_F2M_DBL (0x00a | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_ECC_F2M_DBL_IM_OM_PROJ \
+ (0x00a | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_IN_MONTY \
+ | OP_ALG_PKMODE_MOD_OUT_MONTY \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_F2M_MUL (0x00b | OP_ALG_PKMODE_MOD_F2M)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_R2 \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_R2_IN)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_R2_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_TIME_EQ)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_R2_PROJ \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_PRJECTV)
+#define OP_ALG_PKMODE_ECC_F2M_MUL_R2_PROJ_TEQ \
+ (0x00b | OP_ALG_PKMODE_MOD_F2M \
+ | OP_ALG_PKMODE_MOD_R2_IN \
+ | OP_ALG_PKMODE_PRJECTV \
+ | OP_ALG_PKMODE_TIME_EQ)
+
+/* PKHA mode copy-memory functions */
+#define OP_ALG_PKMODE_SRC_REG_SHIFT 17
+#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_SHIFT 10
+#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_SHIFT 8
+#define OP_ALG_PKMODE_SRC_SEG_MASK (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_SHIFT 6
+#define OP_ALG_PKMODE_DST_SEG_MASK (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+
+#define OP_ALG_PKMODE_SRC_REG_A (0 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_B (1 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_N (3 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_A (0 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_B (1 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_E (2 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_N (3 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_0 (0 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_1 (1 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_2 (2 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_3 (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_0 (0 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_1 (1 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_2 (2 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_3 (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+
+/* PKHA mode copy-memory functions - amount based on N SIZE */
+#define OP_ALG_PKMODE_COPY_NSZ 0x10
+#define OP_ALG_PKMODE_COPY_NSZ_A0_B0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A0_B1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_A0_B2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_A0_B3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_A1_B0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A1_B1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_A1_B2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_A1_B3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_A2_B0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A2_B1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_A2_B2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_A2_B3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_A3_B0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A3_B1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_A3_B2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_A3_B3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_B0_A0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B0_A1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_B0_A2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_B0_A3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_B1_A0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B1_A1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_B1_A2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_B1_A3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_B2_A0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B2_A1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_B2_A2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_B2_A3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_B3_A0 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B3_A1 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_NSZ_B3_A2 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_NSZ_B3_A3 (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_NSZ_A_B (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_A_E (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_E)
+#define OP_ALG_PKMODE_COPY_NSZ_A_N (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_N)
+#define OP_ALG_PKMODE_COPY_NSZ_B_A (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_B_E (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_E)
+#define OP_ALG_PKMODE_COPY_NSZ_B_N (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_N)
+#define OP_ALG_PKMODE_COPY_NSZ_N_A (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_NSZ_N_B (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_NSZ_N_E (OP_ALG_PKMODE_COPY_NSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_E)
+
+/* PKHA mode copy-memory functions - amount based on SRC SIZE */
+#define OP_ALG_PKMODE_COPY_SSZ 0x11
+#define OP_ALG_PKMODE_COPY_SSZ_A0_B0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A0_B1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_A0_B2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_A0_B3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_A1_B0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A1_B1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_A1_B2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_A1_B3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_A2_B0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A2_B1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_A2_B2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_A2_B3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_A3_B0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A3_B1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_A3_B2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_A3_B3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_B | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_B0_A0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B0_A1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_B0_A2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_B0_A3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_B1_A0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B1_A1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_B1_A2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_B1_A3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_1 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_B2_A0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B2_A1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_B2_A2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_B2_A3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_2 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_B3_A0 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B3_A1 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_1)
+#define OP_ALG_PKMODE_COPY_SSZ_B3_A2 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_2)
+#define OP_ALG_PKMODE_COPY_SSZ_B3_A3 (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_SRC_SEG_3 | \
+ OP_ALG_PKMODE_DST_REG_A | \
+ OP_ALG_PKMODE_DST_SEG_3)
+
+#define OP_ALG_PKMODE_COPY_SSZ_A_B (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_A_E (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_E)
+#define OP_ALG_PKMODE_COPY_SSZ_A_N (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_A | \
+ OP_ALG_PKMODE_DST_REG_N)
+#define OP_ALG_PKMODE_COPY_SSZ_B_A (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_B_E (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_E)
+#define OP_ALG_PKMODE_COPY_SSZ_B_N (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_B | \
+ OP_ALG_PKMODE_DST_REG_N)
+#define OP_ALG_PKMODE_COPY_SSZ_N_A (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_A)
+#define OP_ALG_PKMODE_COPY_SSZ_N_B (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_B)
+#define OP_ALG_PKMODE_COPY_SSZ_N_E (OP_ALG_PKMODE_COPY_SSZ | \
+ OP_ALG_PKMODE_SRC_REG_N | \
+ OP_ALG_PKMODE_DST_REG_E)
+
+/*
+ * SEQ_IN_PTR Command Constructs
+ */
+
+/* Release Buffers */
+#define SQIN_RBS BIT(26)
+
+/* Sequence pointer is really a descriptor */
+#define SQIN_INL BIT(25)
+
+/* Sequence pointer is a scatter-gather table */
+#define SQIN_SGF BIT(24)
+
+/* Appends to a previous pointer */
+#define SQIN_PRE BIT(23)
+
+/* Use extended length following pointer */
+#define SQIN_EXT BIT(22)
+
+/* Restore sequence with pointer/length */
+#define SQIN_RTO BIT(21)
+
+/* Replace job descriptor */
+#define SQIN_RJD BIT(20)
+
+/* Sequence Out Pointer - start a new input sequence using output sequence */
+#define SQIN_SOP BIT(19)
+
+#define SQIN_LEN_SHIFT 0
+#define SQIN_LEN_MASK (0xffff << SQIN_LEN_SHIFT)
+
+/*
+ * SEQ_OUT_PTR Command Constructs
+ */
+
+/* Sequence pointer is a scatter-gather table */
+#define SQOUT_SGF BIT(24)
+
+/* Appends to a previous pointer */
+#define SQOUT_PRE BIT(23)
+
+/* Restore sequence with pointer/length */
+#define SQOUT_RTO BIT(21)
+
+/*
+ * Ignore length field, add current output frame length back to SOL register.
+ * Reset tracking length of bytes written to output frame.
+ * Must be used together with SQOUT_RTO.
+ */
+#define SQOUT_RST BIT(20)
+
+/* Allow "write safe" transactions for this Output Sequence */
+#define SQOUT_EWS BIT(19)
+
+/* Use extended length following pointer */
+#define SQOUT_EXT BIT(22)
+
+#define SQOUT_LEN_SHIFT 0
+#define SQOUT_LEN_MASK (0xffff << SQOUT_LEN_SHIFT)
+
+/*
+ * SIGNATURE Command Constructs
+ */
+
+/* TYPE field is all that's relevant */
+#define SIGN_TYPE_SHIFT 16
+#define SIGN_TYPE_MASK (0x0f << SIGN_TYPE_SHIFT)
+
+#define SIGN_TYPE_FINAL (0x00 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL_RESTORE (0x01 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL_NONZERO (0x02 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_2 (0x0a << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_3 (0x0b << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_4 (0x0c << SIGN_TYPE_SHIFT)
+
+/*
+ * MOVE Command Constructs
+ */
+
+#define MOVE_AUX_SHIFT 25
+#define MOVE_AUX_MASK (3 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_MS (2 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_LS (1 << MOVE_AUX_SHIFT)
+
+#define MOVE_WAITCOMP_SHIFT 24
+#define MOVE_WAITCOMP_MASK (1 << MOVE_WAITCOMP_SHIFT)
+#define MOVE_WAITCOMP BIT(24)
+
+#define MOVE_SRC_SHIFT 20
+#define MOVE_SRC_MASK (0x0f << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS1CTX (0x00 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS2CTX (0x01 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_OUTFIFO (0x02 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_DESCBUF (0x03 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH0 (0x04 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH1 (0x05 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH2 (0x06 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO_NO_NFIFO (0x0a << MOVE_SRC_SHIFT)
+
+#define MOVE_DEST_SHIFT 16
+#define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1CTX (0x00 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2CTX (0x01 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_OUTFIFO (0x02 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_DESCBUF (0x03 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH0 (0x04 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH1 (0x05 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH2 (0x06 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_INFIFO (0x0a << MOVE_DEST_SHIFT)
+#define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT)
+#define MOVE_DEST_ALTSOURCE (0x0f << MOVE_DEST_SHIFT)
+
+#define MOVE_OFFSET_SHIFT 8
+#define MOVE_OFFSET_MASK (0xff << MOVE_OFFSET_SHIFT)
+
+#define MOVE_LEN_SHIFT 0
+#define MOVE_LEN_MASK (0xff << MOVE_LEN_SHIFT)
+
+#define MOVELEN_MRSEL_SHIFT 0
+#define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
+#define MOVELEN_MRSEL_MATH0 (0 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH1 (1 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH2 (2 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH3 (3 << MOVELEN_MRSEL_SHIFT)
+
+#define MOVELEN_SIZE_SHIFT 6
+#define MOVELEN_SIZE_MASK (0x3 << MOVELEN_SIZE_SHIFT)
+#define MOVELEN_SIZE_WORD (0x01 << MOVELEN_SIZE_SHIFT)
+#define MOVELEN_SIZE_BYTE (0x02 << MOVELEN_SIZE_SHIFT)
+#define MOVELEN_SIZE_DWORD (0x03 << MOVELEN_SIZE_SHIFT)
+
+/*
+ * MATH Command Constructs
+ */
+
+#define MATH_IFB_SHIFT 26
+#define MATH_IFB_MASK (1 << MATH_IFB_SHIFT)
+#define MATH_IFB BIT(26)
+
+#define MATH_NFU_SHIFT 25
+#define MATH_NFU_MASK (1 << MATH_NFU_SHIFT)
+#define MATH_NFU BIT(25)
+
+/* STL for MATH, SSEL for MATHI */
+#define MATH_STL_SHIFT 24
+#define MATH_STL_MASK (1 << MATH_STL_SHIFT)
+#define MATH_STL BIT(24)
+
+#define MATH_SSEL_SHIFT 24
+#define MATH_SSEL_MASK (1 << MATH_SSEL_SHIFT)
+#define MATH_SSEL BIT(24)
+
+#define MATH_SWP_SHIFT 0
+#define MATH_SWP_MASK (1 << MATH_SWP_SHIFT)
+#define MATH_SWP BIT(0)
+
+/* Function selectors */
+#define MATH_FUN_SHIFT 20
+#define MATH_FUN_MASK (0x0f << MATH_FUN_SHIFT)
+#define MATH_FUN_ADD (0x00 << MATH_FUN_SHIFT)
+#define MATH_FUN_ADDC (0x01 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUB (0x02 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUBB (0x03 << MATH_FUN_SHIFT)
+#define MATH_FUN_OR (0x04 << MATH_FUN_SHIFT)
+#define MATH_FUN_AND (0x05 << MATH_FUN_SHIFT)
+#define MATH_FUN_XOR (0x06 << MATH_FUN_SHIFT)
+#define MATH_FUN_LSHIFT (0x07 << MATH_FUN_SHIFT)
+#define MATH_FUN_RSHIFT (0x08 << MATH_FUN_SHIFT)
+#define MATH_FUN_SHLD (0x09 << MATH_FUN_SHIFT)
+#define MATH_FUN_ZBYT (0x0a << MATH_FUN_SHIFT) /* ZBYT is for MATH */
+#define MATH_FUN_FBYT (0x0a << MATH_FUN_SHIFT) /* FBYT is for MATHI */
+#define MATH_FUN_BSWAP (0x0b << MATH_FUN_SHIFT)
+
+/* Source 0 selectors */
+#define MATH_SRC0_SHIFT 16
+#define MATH_SRC0_MASK (0x0f << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG0 (0x00 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG1 (0x01 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_DPOVRD (0x07 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQOUTLEN (0x0b << MATH_SRC0_SHIFT)
+#define MATH_SRC0_ZERO (0x0c << MATH_SRC0_SHIFT)
+#define MATH_SRC0_ONE (0x0f << MATH_SRC0_SHIFT)
+
+/* Source 1 selectors */
+#define MATH_SRC1_SHIFT 12
+#define MATHI_SRC1_SHIFT 16
+#define MATH_SRC1_MASK (0x0f << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG0 (0x00 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG1 (0x01 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_DPOVRD (0x07 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_VARSEQINLEN (0x08 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_VARSEQOUTLEN (0x09 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
+#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
+#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
+#define MATH_SRC1_JOBSOURCE (0x0d << MATH_SRC1_SHIFT)
+#define MATH_SRC1_ZERO (0x0f << MATH_SRC1_SHIFT)
+
+/* Destination selectors */
+#define MATH_DEST_SHIFT 8
+#define MATHI_DEST_SHIFT 12
+#define MATH_DEST_MASK (0x0f << MATH_DEST_SHIFT)
+#define MATH_DEST_REG0 (0x00 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
+#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQOUTLEN (0x0b << MATH_DEST_SHIFT)
+#define MATH_DEST_NONE (0x0f << MATH_DEST_SHIFT)
+
+/* MATHI Immediate value */
+#define MATHI_IMM_SHIFT 4
+#define MATHI_IMM_MASK (0xff << MATHI_IMM_SHIFT)
+
+/* Length selectors */
+#define MATH_LEN_SHIFT 0
+#define MATH_LEN_MASK (0x0f << MATH_LEN_SHIFT)
+#define MATH_LEN_1BYTE 0x01
+#define MATH_LEN_2BYTE 0x02
+#define MATH_LEN_4BYTE 0x04
+#define MATH_LEN_8BYTE 0x08
+
+/*
+ * JUMP Command Constructs
+ */
+
+#define JUMP_CLASS_SHIFT 25
+#define JUMP_CLASS_MASK (3 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_NONE 0
+#define JUMP_CLASS_CLASS1 (1 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_CLASS2 (2 << JUMP_CLASS_SHIFT)
+#define JUMP_CLASS_BOTH (3 << JUMP_CLASS_SHIFT)
+
+#define JUMP_JSL_SHIFT 24
+#define JUMP_JSL_MASK (1 << JUMP_JSL_SHIFT)
+#define JUMP_JSL BIT(24)
+
+#define JUMP_TYPE_SHIFT 20
+#define JUMP_TYPE_MASK (0x0f << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL (0x00 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL_INC (0x01 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_GOSUB (0x02 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL_DEC (0x03 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_NONLOCAL (0x04 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_RETURN (0x06 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT (0x08 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT_USER (0x0c << JUMP_TYPE_SHIFT)
+
+#define JUMP_TEST_SHIFT 16
+#define JUMP_TEST_MASK (0x03 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ALL (0x00 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVALL (0x01 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ANY (0x02 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVANY (0x03 << JUMP_TEST_SHIFT)
+
+/* Condition codes. JSL bit is factored in */
+#define JUMP_COND_SHIFT 8
+#define JUMP_COND_MASK ((0xff << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_PK_0 BIT(15)
+#define JUMP_COND_PK_GCD_1 BIT(14)
+#define JUMP_COND_PK_PRIME BIT(13)
+#define JUMP_COND_MATH_N BIT(11)
+#define JUMP_COND_MATH_Z BIT(10)
+#define JUMP_COND_MATH_C BIT(9)
+#define JUMP_COND_MATH_NV BIT(8)
+
+#define JUMP_COND_JQP (BIT(15) | JUMP_JSL)
+#define JUMP_COND_SHRD (BIT(14) | JUMP_JSL)
+#define JUMP_COND_SELF (BIT(13) | JUMP_JSL)
+#define JUMP_COND_CALM (BIT(12) | JUMP_JSL)
+#define JUMP_COND_NIP (BIT(11) | JUMP_JSL)
+#define JUMP_COND_NIFP (BIT(10) | JUMP_JSL)
+#define JUMP_COND_NOP (BIT(9) | JUMP_JSL)
+#define JUMP_COND_NCP (BIT(8) | JUMP_JSL)
+
+/* Source / destination selectors */
+#define JUMP_SRC_DST_SHIFT 12
+#define JUMP_SRC_DST_MASK (0x0f << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_MATH0 (0x00 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_MATH1 (0x01 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_MATH2 (0x02 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_MATH3 (0x03 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_DPOVRD (0x07 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_SEQINLEN (0x08 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_SEQOUTLEN (0x09 << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_VARSEQINLEN (0x0a << JUMP_SRC_DST_SHIFT)
+#define JUMP_SRC_DST_VARSEQOUTLEN (0x0b << JUMP_SRC_DST_SHIFT)
+
+#define JUMP_OFFSET_SHIFT 0
+#define JUMP_OFFSET_MASK (0xff << JUMP_OFFSET_SHIFT)
+
+/*
+ * NFIFO ENTRY
+ * Data Constructs
+ *
+ */
+#define NFIFOENTRY_DEST_SHIFT 30
+#define NFIFOENTRY_DEST_MASK ((uint32_t)(3 << NFIFOENTRY_DEST_SHIFT))
+#define NFIFOENTRY_DEST_DECO (0 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_CLASS1 (1 << NFIFOENTRY_DEST_SHIFT)
+#define NFIFOENTRY_DEST_CLASS2 ((uint32_t)(2 << NFIFOENTRY_DEST_SHIFT))
+#define NFIFOENTRY_DEST_BOTH ((uint32_t)(3 << NFIFOENTRY_DEST_SHIFT))
+
+#define NFIFOENTRY_LC2_SHIFT 29
+#define NFIFOENTRY_LC2_MASK (1 << NFIFOENTRY_LC2_SHIFT)
+#define NFIFOENTRY_LC2 BIT(29)
+
+#define NFIFOENTRY_LC1_SHIFT 28
+#define NFIFOENTRY_LC1_MASK (1 << NFIFOENTRY_LC1_SHIFT)
+#define NFIFOENTRY_LC1 BIT(28)
+
+#define NFIFOENTRY_FC2_SHIFT 27
+#define NFIFOENTRY_FC2_MASK (1 << NFIFOENTRY_FC2_SHIFT)
+#define NFIFOENTRY_FC2 BIT(27)
+
+#define NFIFOENTRY_FC1_SHIFT 26
+#define NFIFOENTRY_FC1_MASK (1 << NFIFOENTRY_FC1_SHIFT)
+#define NFIFOENTRY_FC1 BIT(26)
+
+#define NFIFOENTRY_STYPE_SHIFT 24
+#define NFIFOENTRY_STYPE_MASK (3 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_DFIFO (0 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_OFIFO (1 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_PAD (2 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_SNOOP (3 << NFIFOENTRY_STYPE_SHIFT)
+#define NFIFOENTRY_STYPE_ALTSOURCE ((0 << NFIFOENTRY_STYPE_SHIFT) \
+ | NFIFOENTRY_AST)
+#define NFIFOENTRY_STYPE_OFIFO_SYNC ((1 << NFIFOENTRY_STYPE_SHIFT) \
+ | NFIFOENTRY_AST)
+#define NFIFOENTRY_STYPE_SNOOP_ALT ((3 << NFIFOENTRY_STYPE_SHIFT) \
+ | NFIFOENTRY_AST)
+
+#define NFIFOENTRY_DTYPE_SHIFT 20
+#define NFIFOENTRY_DTYPE_MASK (0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_SBOX (0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_AAD (0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_PK_A0 (0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A1 (0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A2 (0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A3 (0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B0 (0x4 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B1 (0x5 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B2 (0x6 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B3 (0x7 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_N (0x8 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_E (0x9 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A (0xC << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B (0xD << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_BND_SHIFT 19
+#define NFIFOENTRY_BND_MASK (1 << NFIFOENTRY_BND_SHIFT)
+#define NFIFOENTRY_BND BIT(19)
+
+#define NFIFOENTRY_PTYPE_SHIFT 16
+#define NFIFOENTRY_PTYPE_MASK (0x7 << NFIFOENTRY_PTYPE_SHIFT)
+
+#define NFIFOENTRY_PTYPE_ZEROS (0x0 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NOZEROS (0x1 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_INCREMENT (0x2 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND (0x3 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_ZEROS_NZ (0x4 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_LZ (0x5 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_N (0x6 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_N (0x7 << NFIFOENTRY_PTYPE_SHIFT)
+
+#define NFIFOENTRY_OC_SHIFT 15
+#define NFIFOENTRY_OC_MASK (1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_OC BIT(15)
+
+#define NFIFOENTRY_PR_SHIFT 15
+#define NFIFOENTRY_PR_MASK (1 << NFIFOENTRY_PR_SHIFT)
+#define NFIFOENTRY_PR BIT(15)
+
+#define NFIFOENTRY_AST_SHIFT 14
+#define NFIFOENTRY_AST_MASK (1 << NFIFOENTRY_AST_SHIFT)
+#define NFIFOENTRY_AST BIT(14)
+
+#define NFIFOENTRY_BM_SHIFT 11
+#define NFIFOENTRY_BM_MASK (1 << NFIFOENTRY_BM_SHIFT)
+#define NFIFOENTRY_BM BIT(11)
+
+#define NFIFOENTRY_PS_SHIFT 10
+#define NFIFOENTRY_PS_MASK (1 << NFIFOENTRY_PS_SHIFT)
+#define NFIFOENTRY_PS BIT(10)
+
+#define NFIFOENTRY_DLEN_SHIFT 0
+#define NFIFOENTRY_DLEN_MASK (0xFFF << NFIFOENTRY_DLEN_SHIFT)
+
+#define NFIFOENTRY_PLEN_SHIFT 0
+#define NFIFOENTRY_PLEN_MASK (0xFF << NFIFOENTRY_PLEN_SHIFT)
+
+/* Append Load Immediate Command */
+#define FD_CMD_APPEND_LOAD_IMMEDIATE BIT(31)
+
+/* Set SEQ LIODN equal to the Non-SEQ LIODN for the job */
+#define FD_CMD_SET_SEQ_LIODN_EQUAL_NONSEQ_LIODN BIT(30)
+
+/* Frame Descriptor Command for Replacement Job Descriptor */
+#define FD_CMD_REPLACE_JOB_DESC BIT(29)
+
+#endif /* __RTA_DESC_H__ */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/desc/algo.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/desc/algo.h
new file mode 100644
index 00000000..c71ada07
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/desc/algo.h
@@ -0,0 +1,465 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DESC_ALGO_H__
+#define __DESC_ALGO_H__
+
+#include "hw/rta.h"
+#include "common.h"
+
+/**
+ * DOC: Algorithms - Shared Descriptor Constructors
+ *
+ * Shared descriptors for algorithms (i.e. not for protocols).
+ */
+
+/**
+ * cnstr_shdsc_snow_f8 - SNOW/f8 (UEA2) as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * @dir: Cipher direction (DIR_ENC/DIR_DEC)
+ * @count: UEA2 count value (32 bits)
+ * @bearer: UEA2 bearer ID (5 bits)
+ * @direction: UEA2 direction (1 bit)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_snow_f8(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata, uint8_t dir,
+ uint32_t count, uint8_t bearer, uint8_t direction)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint32_t ct = count;
+ uint8_t br = bearer;
+ uint8_t dr = direction;
+ uint32_t context[2] = {ct, (br << 27) | (dr << 26)};
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap) {
+ PROGRAM_SET_BSWAP(p);
+
+ context[0] = swab32(context[0]);
+ context[1] = swab32(context[1]);
+ }
+
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQOUTSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8, OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL, 0, dir);
+ LOAD(p, (uintptr_t)context, CONTEXT1, 0, 8, IMMED | COPY);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_snow_f9 - SNOW/f9 (UIA2) as a shared descriptor
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @authdata: pointer to authentication transform definitions
+ * @dir: cipher direction (DIR_ENC/DIR_DEC)
+ * @count: UEA2 count value (32 bits)
+ * @fresh: UEA2 fresh value ID (32 bits)
+ * @direction: UEA2 direction (1 bit)
+ * @datalen: size of data
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_snow_f9(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *authdata, uint8_t dir, uint32_t count,
+ uint32_t fresh, uint8_t direction, uint32_t datalen)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint64_t ct = count;
+ uint64_t fr = fresh;
+ uint64_t dr = direction;
+ uint64_t context[2];
+
+ context[0] = (ct << 32) | (dr << 26);
+ context[1] = fr << 32;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap) {
+ PROGRAM_SET_BSWAP(p);
+
+ context[0] = swab64(context[0]);
+ context[1] = swab64(context[1]);
+ }
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9, OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL, 0, dir);
+ LOAD(p, (uintptr_t)context, CONTEXT2, 0, 16, IMMED | COPY);
+ SEQFIFOLOAD(p, BIT_DATA, datalen, CLASS2 | LAST2);
+ /* Save lower half of MAC out into a 32-bit sequence */
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_blkcipher - block cipher transformation
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * @iv: IV data; if NULL, "ivlen" bytes from the input frame will be read as IV
+ * @ivlen: IV length
+ * @dir: DIR_ENC/DIR_DEC
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_blkcipher(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata, uint8_t *iv,
+ uint32_t ivlen, uint8_t dir)
+{
+ struct program prg;
+ struct program *p = &prg;
+ const bool is_aes_dec = (dir == DIR_DEC) &&
+ (cipherdata->algtype == OP_ALG_ALGSEL_AES);
+ LABEL(keyjmp);
+ LABEL(skipdk);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pskipdk);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+ /* Insert Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ if (is_aes_dec) {
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
+
+ pskipdk = JUMP(p, skipdk, LOCAL_JUMP, ALL_TRUE, 0);
+ }
+ SET_LABEL(p, keyjmp);
+
+ if (is_aes_dec) {
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode |
+ OP_ALG_AAI_DK, OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE, dir);
+ SET_LABEL(p, skipdk);
+ } else {
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
+ }
+
+ if (iv)
+ /* IV load, convert size */
+ LOAD(p, (uintptr_t)iv, CONTEXT1, 0, ivlen, IMMED | COPY);
+ else
+ /* IV is present first before the actual message */
+ SEQLOAD(p, CONTEXT1, 0, ivlen, 0);
+
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQOUTSZ, 4, 0);
+
+ /* Insert sequence load/store with VLF */
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ if (is_aes_dec)
+ PATCH_JUMP(p, pskipdk, skipdk);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_hmac - HMAC shared
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @authdata: pointer to authentication transform definitions;
+ * message digest algorithm: OP_ALG_ALGSEL_MD5/ SHA1-512.
+ * @do_icv: 0 if ICV checking is not desired, any other value if ICV checking
+ * is needed for all the packets processed by this shared descriptor
+ * @trunc_len: Length of the truncated ICV to be written in the output buffer, 0
+ * if no truncation is needed
+ *
+ * Note: There's no support for keys longer than the block size of the
+ * underlying hash function, according to the selected algorithm.
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_hmac(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *authdata, uint8_t do_icv,
+ uint8_t trunc_len)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint8_t storelen, opicv, dir;
+ LABEL(keyjmp);
+ LABEL(jmpprecomp);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pjmpprecomp);
+
+ /* Compute fixed-size store based on alg selection */
+ switch (authdata->algtype) {
+ case OP_ALG_ALGSEL_MD5:
+ storelen = 16;
+ break;
+ case OP_ALG_ALGSEL_SHA1:
+ storelen = 20;
+ break;
+ case OP_ALG_ALGSEL_SHA224:
+ storelen = 28;
+ break;
+ case OP_ALG_ALGSEL_SHA256:
+ storelen = 32;
+ break;
+ case OP_ALG_ALGSEL_SHA384:
+ storelen = 48;
+ break;
+ case OP_ALG_ALGSEL_SHA512:
+ storelen = 64;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ trunc_len = trunc_len && (trunc_len < storelen) ? trunc_len : storelen;
+
+ opicv = do_icv ? ICV_CHECK_ENABLE : ICV_CHECK_DISABLE;
+ dir = do_icv ? DIR_DEC : DIR_ENC;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ /* Do operation */
+ ALG_OPERATION(p, authdata->algtype, OP_ALG_AAI_HMAC,
+ OP_ALG_AS_INITFINAL, opicv, dir);
+
+ pjmpprecomp = JUMP(p, jmpprecomp, LOCAL_JUMP, ALL_TRUE, 0);
+ SET_LABEL(p, keyjmp);
+
+ ALG_OPERATION(p, authdata->algtype, OP_ALG_AAI_HMAC_PRECOMP,
+ OP_ALG_AS_INITFINAL, opicv, dir);
+
+ SET_LABEL(p, jmpprecomp);
+
+ /* compute sequences */
+ if (opicv == ICV_CHECK_ENABLE)
+ MATHB(p, SEQINSZ, SUB, trunc_len, VSEQINSZ, 4, IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+
+ /* Do load (variable length) */
+ SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
+
+ if (opicv == ICV_CHECK_ENABLE)
+ SEQFIFOLOAD(p, ICV2, trunc_len, LAST2);
+ else
+ SEQSTORE(p, CONTEXT2, 0, trunc_len, 0);
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_JUMP(p, pjmpprecomp, jmpprecomp);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_kasumi_f8 - KASUMI F8 (Confidentiality) as a shared descriptor
+ * (ETSI "Document 1: f8 and f9 specification")
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @cipherdata: pointer to block cipher transform definitions
+ * @dir: cipher direction (DIR_ENC/DIR_DEC)
+ * @count: count value (32 bits)
+ * @bearer: bearer ID (5 bits)
+ * @direction: direction (1 bit)
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_kasumi_f8(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata, uint8_t dir,
+ uint32_t count, uint8_t bearer, uint8_t direction)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint64_t ct = count;
+ uint64_t br = bearer;
+ uint64_t dr = direction;
+ uint32_t context[2] = { ct, (br << 27) | (dr << 26) };
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap) {
+ PROGRAM_SET_BSWAP(p);
+
+ context[0] = swab32(context[0]);
+ context[1] = swab32(context[1]);
+ }
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQOUTSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_KASUMI, OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL, 0, dir);
+ LOAD(p, (uintptr_t)context, CONTEXT1, 0, 8, IMMED | COPY);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_kasumi_f9 - KASUMI F9 (Integrity) as a shared descriptor
+ * (ETSI "Document 1: f8 and f9 specification")
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @authdata: pointer to authentication transform definitions
+ * @dir: cipher direction (DIR_ENC/DIR_DEC)
+ * @count: count value (32 bits)
+ * @fresh: fresh value ID (32 bits)
+ * @direction: direction (1 bit)
+ * @datalen: size of data
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_kasumi_f9(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *authdata, uint8_t dir,
+ uint32_t count, uint32_t fresh, uint8_t direction,
+ uint32_t datalen)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint16_t ctx_offset = 16;
+ uint32_t context[6] = {count, direction << 26, fresh, 0, 0, 0};
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap) {
+ PROGRAM_SET_BSWAP(p);
+
+ context[0] = swab32(context[0]);
+ context[1] = swab32(context[1]);
+ context[2] = swab32(context[2]);
+ }
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_KASUMI, OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL, 0, dir);
+ LOAD(p, (uintptr_t)context, CONTEXT1, 0, 24, IMMED | COPY);
+ SEQFIFOLOAD(p, BIT_DATA, datalen, CLASS1 | LAST1);
+ /* Save output MAC of DWORD 2 into a 32-bit sequence */
+ SEQSTORE(p, CONTEXT1, ctx_offset, 4, 0);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_crc - CRC32 Accelerator (IEEE 802 CRC32 protocol mode)
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_crc(uint32_t *descbuf, bool swap)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_CRC,
+ OP_ALG_AAI_802 | OP_ALG_AAI_DOC,
+ OP_ALG_AS_FINALIZE, 0, DIR_ENC);
+ SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+#endif /* __DESC_ALGO_H__ */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/desc/common.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/desc/common.h
new file mode 100644
index 00000000..6b254908
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/desc/common.h
@@ -0,0 +1,131 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DESC_COMMON_H__
+#define __DESC_COMMON_H__
+
+#include "hw/rta.h"
+
+/**
+ * DOC: Shared Descriptor Constructors - shared structures
+ *
+ * Data structures shared between algorithm, protocol implementations.
+ */
+
+/**
+ * struct alginfo - Container for algorithm details
+ * @algtype: algorithm selector; for valid values, see documentation of the
+ * functions where it is used.
+ * @keylen: length of the provided algorithm key, in bytes
+ * @key: address where algorithm key resides; virtual address if key_type is
+ * RTA_DATA_IMM, physical (bus) address if key_type is RTA_DATA_PTR or
+ * RTA_DATA_IMM_DMA.
+ * @key_enc_flags: key encryption flags; see encrypt_flags parameter of KEY
+ * command for valid values.
+ * @key_type: enum rta_data_type
+ * @algmode: algorithm mode selector; for valid values, see documentation of the
+ * functions where it is used.
+ */
+struct alginfo {
+ uint32_t algtype;
+ uint32_t keylen;
+ uint64_t key;
+ uint32_t key_enc_flags;
+ enum rta_data_type key_type;
+ uint16_t algmode;
+};
+
+#define INLINE_KEY(alginfo) inline_flags(alginfo->key_type)
+
+/**
+ * rta_inline_query() - Provide indications on which data items can be inlined
+ * and which shall be referenced in a shared descriptor.
+ * @sd_base_len: Shared descriptor base length - bytes consumed by the commands,
+ * excluding the data items to be inlined (or corresponding
+ * pointer if an item is not inlined). Each cnstr_* function that
+ * generates descriptors should have a define mentioning
+ * corresponding length.
+ * @jd_len: Maximum length of the job descriptor(s) that will be used
+ * together with the shared descriptor.
+ * @data_len: Array of lengths of the data items trying to be inlined
+ * @inl_mask: 32bit mask with bit x = 1 if data item x can be inlined, 0
+ * otherwise.
+ * @count: Number of data items (size of @data_len array); must be <= 32
+ *
+ * Return: 0 if data can be inlined / referenced, negative value if not. If 0,
+ * check @inl_mask for details.
+ */
+static inline int
+rta_inline_query(unsigned int sd_base_len,
+ unsigned int jd_len,
+ unsigned int *data_len,
+ uint32_t *inl_mask,
+ unsigned int count)
+{
+ int rem_bytes = (int)(CAAM_DESC_BYTES_MAX - sd_base_len - jd_len);
+ unsigned int i;
+
+ *inl_mask = 0;
+ for (i = 0; (i < count) && (rem_bytes > 0); i++) {
+ if (rem_bytes - (int)(data_len[i] +
+ (count - i - 1) * CAAM_PTR_SZ) >= 0) {
+ rem_bytes -= data_len[i];
+ *inl_mask |= (1 << i);
+ } else {
+ rem_bytes -= CAAM_PTR_SZ;
+ }
+ }
+
+ return (rem_bytes >= 0) ? 0 : -1;
+}
+
+/**
+ * struct protcmd - Container for Protocol Operation Command fields
+ * @optype: command type
+ * @protid: protocol Identifier
+ * @protinfo: protocol Information
+ */
+struct protcmd {
+ uint32_t optype;
+ uint32_t protid;
+ uint16_t protinfo;
+};
+
+#endif /* __DESC_COMMON_H__ */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
new file mode 100644
index 00000000..c63d0dac
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
@@ -0,0 +1,1547 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DESC_IPSEC_H__
+#define __DESC_IPSEC_H__
+
+#include "hw/rta.h"
+#include "common.h"
+
+/**
+ * DOC: IPsec Shared Descriptor Constructors
+ *
+ * Shared descriptors for IPsec protocol.
+ */
+
+/* General IPSec ESP encap / decap PDB options */
+
+/**
+ * PDBOPTS_ESP_ESN - Extended sequence included
+ */
+#define PDBOPTS_ESP_ESN 0x10
+
+/**
+ * PDBOPTS_ESP_IPVSN - Process IPv6 header
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_IPVSN 0x02
+
+/**
+ * PDBOPTS_ESP_TUNNEL - Tunnel mode next-header byte
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_TUNNEL 0x01
+
+/* IPSec ESP Encap PDB options */
+
+/**
+ * PDBOPTS_ESP_UPDATE_CSUM - Update ip header checksum
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_UPDATE_CSUM 0x80
+
+/**
+ * PDBOPTS_ESP_DIFFSERV - Copy TOS/TC from inner iphdr
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_DIFFSERV 0x40
+
+/**
+ * PDBOPTS_ESP_IVSRC - IV comes from internal random gen
+ */
+#define PDBOPTS_ESP_IVSRC 0x20
+
+/**
+ * PDBOPTS_ESP_IPHDRSRC - IP header comes from PDB
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_IPHDRSRC 0x08
+
+/**
+ * PDBOPTS_ESP_INCIPHDR - Prepend IP header to output frame
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_INCIPHDR 0x04
+
+/**
+ * PDBOPTS_ESP_OIHI_MASK - Mask for Outer IP Header Included
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_OIHI_MASK 0x0c
+
+/**
+ * PDBOPTS_ESP_OIHI_PDB_INL - Prepend IP header to output frame from PDB (where
+ * it is inlined).
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_OIHI_PDB_INL 0x0c
+
+/**
+ * PDBOPTS_ESP_OIHI_PDB_REF - Prepend IP header to output frame from PDB
+ * (referenced by pointer).
+ *
+ * Vlid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_OIHI_PDB_REF 0x08
+
+/**
+ * PDBOPTS_ESP_OIHI_IF - Prepend IP header to output frame from input frame
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_OIHI_IF 0x04
+
+/**
+ * PDBOPTS_ESP_NAT - Enable RFC 3948 UDP-encapsulated-ESP
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_NAT 0x02
+
+/**
+ * PDBOPTS_ESP_NUC - Enable NAT UDP Checksum
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_NUC 0x01
+
+/* IPSec ESP Decap PDB options */
+
+/**
+ * PDBOPTS_ESP_ARS_MASK - antireplay window mask
+ */
+#define PDBOPTS_ESP_ARS_MASK 0xc0
+
+/**
+ * PDBOPTS_ESP_ARSNONE - No antireplay window
+ */
+#define PDBOPTS_ESP_ARSNONE 0x00
+
+/**
+ * PDBOPTS_ESP_ARS64 - 64-entry antireplay window
+ */
+#define PDBOPTS_ESP_ARS64 0xc0
+
+/**
+ * PDBOPTS_ESP_ARS128 - 128-entry antireplay window
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_ARS128 0x80
+
+/**
+ * PDBOPTS_ESP_ARS32 - 32-entry antireplay window
+ */
+#define PDBOPTS_ESP_ARS32 0x40
+
+/**
+ * PDBOPTS_ESP_VERIFY_CSUM - Validate ip header checksum
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_VERIFY_CSUM 0x20
+
+/**
+ * PDBOPTS_ESP_TECN - Implement RRFC6040 ECN tunneling from outer header to
+ * inner header.
+ *
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_TECN 0x20
+
+/**
+ * PDBOPTS_ESP_OUTFMT - Output only decapsulation
+ *
+ * Valid only for IPsec legacy mode.
+ */
+#define PDBOPTS_ESP_OUTFMT 0x08
+
+/**
+ * PDBOPTS_ESP_AOFL - Adjust out frame len
+ *
+ * Valid only for IPsec legacy mode and for SEC >= 5.3.
+ */
+#define PDBOPTS_ESP_AOFL 0x04
+
+/**
+ * PDBOPTS_ESP_ETU - EtherType Update
+ *
+ * Add corresponding ethertype (0x0800 for IPv4, 0x86dd for IPv6) in the output
+ * frame.
+ * Valid only for IPsec new mode.
+ */
+#define PDBOPTS_ESP_ETU 0x01
+
+#define PDBHMO_ESP_DECAP_SHIFT 28
+#define PDBHMO_ESP_ENCAP_SHIFT 28
+#define PDBNH_ESP_ENCAP_SHIFT 16
+#define PDBNH_ESP_ENCAP_MASK (0xff << PDBNH_ESP_ENCAP_SHIFT)
+#define PDBHDRLEN_ESP_DECAP_SHIFT 16
+#define PDBHDRLEN_MASK (0x0fff << PDBHDRLEN_ESP_DECAP_SHIFT)
+#define PDB_NH_OFFSET_SHIFT 8
+#define PDB_NH_OFFSET_MASK (0xff << PDB_NH_OFFSET_SHIFT)
+
+/**
+ * PDBHMO_ESP_DECAP_DTTL - IPsec ESP decrement TTL (IPv4) / Hop limit (IPv6)
+ * HMO option.
+ */
+#define PDBHMO_ESP_DECAP_DTTL (0x02 << PDBHMO_ESP_DECAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_ENCAP_DTTL - IPsec ESP increment TTL (IPv4) / Hop limit (IPv6)
+ * HMO option.
+ */
+#define PDBHMO_ESP_ENCAP_DTTL (0x02 << PDBHMO_ESP_ENCAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_DIFFSERV - (Decap) DiffServ Copy - Copy the IPv4 TOS or IPv6
+ * Traffic Class byte from the outer IP header to the
+ * inner IP header.
+ */
+#define PDBHMO_ESP_DIFFSERV (0x01 << PDBHMO_ESP_DECAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_SNR - (Encap) - Sequence Number Rollover control
+ *
+ * Configures behaviour in case of SN / ESN rollover:
+ * error if SNR = 1, rollover allowed if SNR = 0.
+ * Valid only for IPsec new mode.
+ */
+#define PDBHMO_ESP_SNR (0x01 << PDBHMO_ESP_ENCAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_DFBIT - (Encap) Copy DF bit - if an IPv4 tunnel mode outer IP
+ * header is coming from the PDB, copy the DF bit from the
+ * inner IP header to the outer IP header.
+ */
+#define PDBHMO_ESP_DFBIT (0x04 << PDBHMO_ESP_ENCAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_DFV - (Decap) - DF bit value
+ *
+ * If ODF = 1, DF bit in output frame is replaced by DFV.
+ * Valid only from SEC Era 5 onwards.
+ */
+#define PDBHMO_ESP_DFV (0x04 << PDBHMO_ESP_DECAP_SHIFT)
+
+/**
+ * PDBHMO_ESP_ODF - (Decap) Override DF bit in IPv4 header of decapsulated
+ * output frame.
+ *
+ * If ODF = 1, DF is replaced with the value of DFV bit.
+ * Valid only from SEC Era 5 onwards.
+ */
+#define PDBHMO_ESP_ODF (0x08 << PDBHMO_ESP_DECAP_SHIFT)
+
+/**
+ * struct ipsec_encap_cbc - PDB part for IPsec CBC encapsulation
+ * @iv: 16-byte array initialization vector
+ */
+struct ipsec_encap_cbc {
+ uint8_t iv[16];
+};
+
+
+/**
+ * struct ipsec_encap_ctr - PDB part for IPsec CTR encapsulation
+ * @ctr_nonce: 4-byte array nonce
+ * @ctr_initial: initial count constant
+ * @iv: initialization vector
+ */
+struct ipsec_encap_ctr {
+ uint8_t ctr_nonce[4];
+ uint32_t ctr_initial;
+ uint64_t iv;
+};
+
+/**
+ * struct ipsec_encap_ccm - PDB part for IPsec CCM encapsulation
+ * @salt: 3-byte array salt (lower 24 bits)
+ * @ccm_opt: CCM algorithm options - MSB-LSB description:
+ * b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV,
+ * 0x7B for 16-byte ICV (cf. RFC4309, RFC3610)
+ * ctr_flags (8b) - counter flags; constant equal to 0x3
+ * ctr_initial (16b) - initial count constant
+ * @iv: initialization vector
+ */
+struct ipsec_encap_ccm {
+ uint8_t salt[4];
+ uint32_t ccm_opt;
+ uint64_t iv;
+};
+
+/**
+ * struct ipsec_encap_gcm - PDB part for IPsec GCM encapsulation
+ * @salt: 3-byte array salt (lower 24 bits)
+ * @rsvd: reserved, do not use
+ * @iv: initialization vector
+ */
+struct ipsec_encap_gcm {
+ uint8_t salt[4];
+ uint32_t rsvd;
+ uint64_t iv;
+};
+
+/**
+ * struct ipsec_encap_pdb - PDB for IPsec encapsulation
+ * @options: MSB-LSB description (both for legacy and new modes)
+ * hmo (header manipulation options) - 4b
+ * reserved - 4b
+ * next header (legacy) / reserved (new) - 8b
+ * next header offset (legacy) / AOIPHO (actual outer IP header offset) - 8b
+ * option flags (depend on selected algorithm) - 8b
+ * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN)
+ * @seq_num: IPsec sequence number
+ * @spi: IPsec SPI (Security Parameters Index)
+ * @ip_hdr_len: optional IP Header length (in bytes)
+ * reserved - 16b
+ * Opt. IP Hdr Len - 16b
+ * @ip_hdr: optional IP Header content (only for IPsec legacy mode)
+ */
+struct ipsec_encap_pdb {
+ uint32_t options;
+ uint32_t seq_num_ext_hi;
+ uint32_t seq_num;
+ union {
+ struct ipsec_encap_cbc cbc;
+ struct ipsec_encap_ctr ctr;
+ struct ipsec_encap_ccm ccm;
+ struct ipsec_encap_gcm gcm;
+ };
+ uint32_t spi;
+ uint32_t ip_hdr_len;
+ uint8_t ip_hdr[0];
+};
+
+static inline unsigned int
+__rta_copy_ipsec_encap_pdb(struct program *program,
+ struct ipsec_encap_pdb *pdb,
+ uint32_t algtype)
+{
+ unsigned int start_pc = program->current_pc;
+
+ __rta_out32(program, pdb->options);
+ __rta_out32(program, pdb->seq_num_ext_hi);
+ __rta_out32(program, pdb->seq_num);
+
+ switch (algtype & OP_PCL_IPSEC_CIPHER_MASK) {
+ case OP_PCL_IPSEC_DES_IV64:
+ case OP_PCL_IPSEC_DES:
+ case OP_PCL_IPSEC_3DES:
+ case OP_PCL_IPSEC_AES_CBC:
+ case OP_PCL_IPSEC_NULL:
+ rta_copy_data(program, pdb->cbc.iv, sizeof(pdb->cbc.iv));
+ break;
+
+ case OP_PCL_IPSEC_AES_CTR:
+ rta_copy_data(program, pdb->ctr.ctr_nonce,
+ sizeof(pdb->ctr.ctr_nonce));
+ __rta_out32(program, pdb->ctr.ctr_initial);
+ __rta_out64(program, true, pdb->ctr.iv);
+ break;
+
+ case OP_PCL_IPSEC_AES_CCM8:
+ case OP_PCL_IPSEC_AES_CCM12:
+ case OP_PCL_IPSEC_AES_CCM16:
+ rta_copy_data(program, pdb->ccm.salt, sizeof(pdb->ccm.salt));
+ __rta_out32(program, pdb->ccm.ccm_opt);
+ __rta_out64(program, true, pdb->ccm.iv);
+ break;
+
+ case OP_PCL_IPSEC_AES_GCM8:
+ case OP_PCL_IPSEC_AES_GCM12:
+ case OP_PCL_IPSEC_AES_GCM16:
+ case OP_PCL_IPSEC_AES_NULL_WITH_GMAC:
+ rta_copy_data(program, pdb->gcm.salt, sizeof(pdb->gcm.salt));
+ __rta_out32(program, pdb->gcm.rsvd);
+ __rta_out64(program, true, pdb->gcm.iv);
+ break;
+ }
+
+ __rta_out32(program, pdb->spi);
+ __rta_out32(program, pdb->ip_hdr_len);
+
+ return start_pc;
+}
+
+/**
+ * struct ipsec_decap_cbc - PDB part for IPsec CBC decapsulation
+ * @rsvd: reserved, do not use
+ */
+struct ipsec_decap_cbc {
+ uint32_t rsvd[2];
+};
+
+/**
+ * struct ipsec_decap_ctr - PDB part for IPsec CTR decapsulation
+ * @ctr_nonce: 4-byte array nonce
+ * @ctr_initial: initial count constant
+ */
+struct ipsec_decap_ctr {
+ uint8_t ctr_nonce[4];
+ uint32_t ctr_initial;
+};
+
+/**
+ * struct ipsec_decap_ccm - PDB part for IPsec CCM decapsulation
+ * @salt: 3-byte salt (lower 24 bits)
+ * @ccm_opt: CCM algorithm options - MSB-LSB description:
+ * b0_flags (8b) - CCM B0; use 0x5B for 8-byte ICV, 0x6B for 12-byte ICV,
+ * 0x7B for 16-byte ICV (cf. RFC4309, RFC3610)
+ * ctr_flags (8b) - counter flags; constant equal to 0x3
+ * ctr_initial (16b) - initial count constant
+ */
+struct ipsec_decap_ccm {
+ uint8_t salt[4];
+ uint32_t ccm_opt;
+};
+
+/**
+ * struct ipsec_decap_gcm - PDB part for IPsec GCN decapsulation
+ * @salt: 4-byte salt
+ * @rsvd: reserved, do not use
+ */
+struct ipsec_decap_gcm {
+ uint8_t salt[4];
+ uint32_t rsvd;
+};
+
+/**
+ * struct ipsec_decap_pdb - PDB for IPsec decapsulation
+ * @options: MSB-LSB description (both for legacy and new modes)
+ * hmo (header manipulation options) - 4b
+ * IP header length - 12b
+ * next header offset (legacy) / AOIPHO (actual outer IP header offset) - 8b
+ * option flags (depend on selected algorithm) - 8b
+ * @seq_num_ext_hi: (optional) IPsec Extended Sequence Number (ESN)
+ * @seq_num: IPsec sequence number
+ * @anti_replay: Anti-replay window; size depends on ARS (option flags);
+ * format must be Big Endian, irrespective of platform
+ */
+struct ipsec_decap_pdb {
+ uint32_t options;
+ union {
+ struct ipsec_decap_cbc cbc;
+ struct ipsec_decap_ctr ctr;
+ struct ipsec_decap_ccm ccm;
+ struct ipsec_decap_gcm gcm;
+ };
+ uint32_t seq_num_ext_hi;
+ uint32_t seq_num;
+ uint32_t anti_replay[4];
+};
+
+static inline unsigned int
+__rta_copy_ipsec_decap_pdb(struct program *program,
+ struct ipsec_decap_pdb *pdb,
+ uint32_t algtype)
+{
+ unsigned int start_pc = program->current_pc;
+ unsigned int i, ars;
+
+ __rta_out32(program, pdb->options);
+
+ switch (algtype & OP_PCL_IPSEC_CIPHER_MASK) {
+ case OP_PCL_IPSEC_DES_IV64:
+ case OP_PCL_IPSEC_DES:
+ case OP_PCL_IPSEC_3DES:
+ case OP_PCL_IPSEC_AES_CBC:
+ case OP_PCL_IPSEC_NULL:
+ __rta_out32(program, pdb->cbc.rsvd[0]);
+ __rta_out32(program, pdb->cbc.rsvd[1]);
+ break;
+
+ case OP_PCL_IPSEC_AES_CTR:
+ rta_copy_data(program, pdb->ctr.ctr_nonce,
+ sizeof(pdb->ctr.ctr_nonce));
+ __rta_out32(program, pdb->ctr.ctr_initial);
+ break;
+
+ case OP_PCL_IPSEC_AES_CCM8:
+ case OP_PCL_IPSEC_AES_CCM12:
+ case OP_PCL_IPSEC_AES_CCM16:
+ rta_copy_data(program, pdb->ccm.salt, sizeof(pdb->ccm.salt));
+ __rta_out32(program, pdb->ccm.ccm_opt);
+ break;
+
+ case OP_PCL_IPSEC_AES_GCM8:
+ case OP_PCL_IPSEC_AES_GCM12:
+ case OP_PCL_IPSEC_AES_GCM16:
+ case OP_PCL_IPSEC_AES_NULL_WITH_GMAC:
+ rta_copy_data(program, pdb->gcm.salt, sizeof(pdb->gcm.salt));
+ __rta_out32(program, pdb->gcm.rsvd);
+ break;
+ }
+
+ __rta_out32(program, pdb->seq_num_ext_hi);
+ __rta_out32(program, pdb->seq_num);
+
+ switch (pdb->options & PDBOPTS_ESP_ARS_MASK) {
+ case PDBOPTS_ESP_ARS128:
+ ars = 4;
+ break;
+ case PDBOPTS_ESP_ARS64:
+ ars = 2;
+ break;
+ case PDBOPTS_ESP_ARS32:
+ ars = 1;
+ break;
+ case PDBOPTS_ESP_ARSNONE:
+ default:
+ ars = 0;
+ break;
+ }
+
+ for (i = 0; i < ars; i++)
+ __rta_out_be32(program, pdb->anti_replay[i]);
+
+ return start_pc;
+}
+
+/**
+ * enum ipsec_icv_size - Type selectors for icv size in IPsec protocol
+ * @IPSEC_ICV_MD5_SIZE: full-length MD5 ICV
+ * @IPSEC_ICV_MD5_TRUNC_SIZE: truncated MD5 ICV
+ */
+enum ipsec_icv_size {
+ IPSEC_ICV_MD5_SIZE = 16,
+ IPSEC_ICV_MD5_TRUNC_SIZE = 12
+};
+
+/*
+ * IPSec ESP Datapath Protocol Override Register (DPOVRD)
+ */
+
+#define IPSEC_DECO_DPOVRD_USE 0x80
+
+struct ipsec_deco_dpovrd {
+ uint8_t ovrd_ecn;
+ uint8_t ip_hdr_len;
+ uint8_t nh_offset;
+ union {
+ uint8_t next_header; /* next header if encap */
+ uint8_t rsvd; /* reserved if decap */
+ };
+};
+
+struct ipsec_new_encap_deco_dpovrd {
+#define IPSEC_NEW_ENCAP_DECO_DPOVRD_USE 0x8000
+ uint16_t ovrd_ip_hdr_len; /* OVRD + outer IP header material
+ * length
+ */
+#define IPSEC_NEW_ENCAP_OIMIF 0x80
+ uint8_t oimif_aoipho; /* OIMIF + actual outer IP header
+ * offset
+ */
+ uint8_t rsvd;
+};
+
+struct ipsec_new_decap_deco_dpovrd {
+ uint8_t ovrd;
+ uint8_t aoipho_hi; /* upper nibble of actual outer IP
+ * header
+ */
+ uint16_t aoipho_lo_ip_hdr_len; /* lower nibble of actual outer IP
+ * header + outer IP header material
+ */
+};
+
+static inline void
+__gen_auth_key(struct program *program, struct alginfo *authdata)
+{
+ uint32_t dkp_protid;
+
+ switch (authdata->algtype & OP_PCL_IPSEC_AUTH_MASK) {
+ case OP_PCL_IPSEC_HMAC_MD5_96:
+ case OP_PCL_IPSEC_HMAC_MD5_128:
+ dkp_protid = OP_PCLID_DKP_MD5;
+ break;
+ case OP_PCL_IPSEC_HMAC_SHA1_96:
+ case OP_PCL_IPSEC_HMAC_SHA1_160:
+ dkp_protid = OP_PCLID_DKP_SHA1;
+ break;
+ case OP_PCL_IPSEC_HMAC_SHA2_256_128:
+ dkp_protid = OP_PCLID_DKP_SHA256;
+ break;
+ case OP_PCL_IPSEC_HMAC_SHA2_384_192:
+ dkp_protid = OP_PCLID_DKP_SHA384;
+ break;
+ case OP_PCL_IPSEC_HMAC_SHA2_512_256:
+ dkp_protid = OP_PCLID_DKP_SHA512;
+ break;
+ default:
+ KEY(program, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ return;
+ }
+
+ if (authdata->key_type == RTA_DATA_PTR)
+ DKP_PROTOCOL(program, dkp_protid, OP_PCL_DKP_SRC_PTR,
+ OP_PCL_DKP_DST_PTR, (uint16_t)authdata->keylen,
+ authdata->key, authdata->key_type);
+ else
+ DKP_PROTOCOL(program, dkp_protid, OP_PCL_DKP_SRC_IMM,
+ OP_PCL_DKP_DST_IMM, (uint16_t)authdata->keylen,
+ authdata->key, authdata->key_type);
+}
+
+/**
+ * cnstr_shdsc_ipsec_encap - IPSec ESP encapsulation protocol-level shared
+ * descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: if true, perform descriptor byte swapping on a 4-byte boundary
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for a details of the encapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ * @authdata: pointer to authentication transform definitions
+ * If an authentication key is required by the protocol:
+ * -For SEC Eras 1-5, an MDHA split key must be provided;
+ * Note that the size of the split key itself must be specified.
+ * -For SEC Eras 6+, a "normal" key must be provided; DKP (Derived
+ * Key Protocol) will be used to compute MDHA on the fly in HW.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_encap(uint32_t *descbuf, bool ps, bool swap,
+ struct ipsec_encap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ REFERENCE(pkeyjmp);
+ LABEL(hdr);
+ REFERENCE(phdr);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_encap_pdb(p, pdb, cipherdata->algtype);
+ COPY_DATA(p, pdb->ip_hdr, pdb->ip_hdr_len);
+ SET_LABEL(p, hdr);
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, BOTH|SHRD);
+ if (authdata->keylen) {
+ if (rta_sec_era < RTA_SEC_ERA_6)
+ KEY(p, MDHA_SPLIT_KEY, authdata->key_enc_flags,
+ authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ else
+ __gen_auth_key(p, authdata);
+ }
+ if (cipherdata->keylen)
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, keyjmp);
+ PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL,
+ OP_PCLID_IPSEC,
+ (uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_HDR(p, phdr, hdr);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_ipsec_decap - IPSec ESP decapsulation protocol-level shared
+ * descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: if true, perform descriptor byte swapping on a 4-byte boundary
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for details about the decapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ * @authdata: pointer to authentication transform definitions
+ * If an authentication key is required by the protocol:
+ * -For SEC Eras 1-5, an MDHA split key must be provided;
+ * Note that the size of the split key itself must be specified.
+ * -For SEC Eras 6+, a "normal" key must be provided; DKP (Derived
+ * Key Protocol) will be used to compute MDHA on the fly in HW.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_decap(uint32_t *descbuf, bool ps, bool swap,
+ struct ipsec_decap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ REFERENCE(pkeyjmp);
+ LABEL(hdr);
+ REFERENCE(phdr);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_decap_pdb(p, pdb, cipherdata->algtype);
+ SET_LABEL(p, hdr);
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, BOTH|SHRD);
+ if (authdata->keylen) {
+ if (rta_sec_era < RTA_SEC_ERA_6)
+ KEY(p, MDHA_SPLIT_KEY, authdata->key_enc_flags,
+ authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ else
+ __gen_auth_key(p, authdata);
+ }
+ if (cipherdata->keylen)
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, keyjmp);
+ PROTOCOL(p, OP_TYPE_DECAP_PROTOCOL,
+ OP_PCLID_IPSEC,
+ (uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_HDR(p, phdr, hdr);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_ipsec_encap_des_aes_xcbc - IPSec DES-CBC/3DES-CBC and
+ * AES-XCBC-MAC-96 ESP encapsulation shared descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for a details of the encapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_PCL_IPSEC_DES, OP_PCL_IPSEC_3DES.
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm value: OP_PCL_IPSEC_AES_XCBC_MAC_96.
+ *
+ * Supported only for platforms with 32-bit address pointers and SEC ERA 4 or
+ * higher. The tunnel/transport mode of the IPsec ESP is supported only if the
+ * Outer/Transport IP Header is present in the encapsulation output packet.
+ * The descriptor performs DES-CBC/3DES-CBC & HMAC-MD5-96 and then rereads
+ * the input packet to do the AES-XCBC-MAC-96 calculation and to overwrite
+ * the MD5 ICV.
+ * The descriptor uses all the benefits of the built-in protocol by computing
+ * the IPsec ESP with a hardware supported algorithms combination
+ * (DES-CBC/3DES-CBC & HMAC-MD5-96). The HMAC-MD5 authentication algorithm
+ * was chosen in order to speed up the computational time for this intermediate
+ * step.
+ * Warning: The user must allocate at least 32 bytes for the authentication key
+ * (in order to use it also with HMAC-MD5-96),even when using a shorter key
+ * for the AES-XCBC-MAC-96.
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_encap_des_aes_xcbc(uint32_t *descbuf,
+ struct ipsec_encap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(hdr);
+ LABEL(shd_ptr);
+ LABEL(keyjump);
+ LABEL(outptr);
+ LABEL(swapped_seqin_fields);
+ LABEL(swapped_seqin_ptr);
+ REFERENCE(phdr);
+ REFERENCE(pkeyjump);
+ REFERENCE(move_outlen);
+ REFERENCE(move_seqout_ptr);
+ REFERENCE(swapped_seqin_ptr_jump);
+ REFERENCE(write_swapped_seqin_ptr);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_encap_pdb(p, pdb, cipherdata->algtype);
+ COPY_DATA(p, pdb->ip_hdr, pdb->ip_hdr_len);
+ SET_LABEL(p, hdr);
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF);
+ /*
+ * Hard-coded KEY arguments. The descriptor uses all the benefits of
+ * the built-in protocol by computing the IPsec ESP with a hardware
+ * supported algorithms combination (DES-CBC/3DES-CBC & HMAC-MD5-96).
+ * The HMAC-MD5 authentication algorithm was chosen with
+ * the keys options from below in order to speed up the computational
+ * time for this intermediate step.
+ * Warning: The user must allocate at least 32 bytes for
+ * the authentication key (in order to use it also with HMAC-MD5-96),
+ * even when using a shorter key for the AES-XCBC-MAC-96.
+ */
+ KEY(p, MDHA_SPLIT_KEY, 0, authdata->key, 32, INLINE_KEY(authdata));
+ SET_LABEL(p, keyjump);
+ LOAD(p, LDST_SRCDST_WORD_CLRW | CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1CTX | CLRW_CLR_C1KEY | CLRW_RESET_CLS1_CHA, CLRW, 0, 4,
+ IMMED);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL, OP_PCLID_IPSEC,
+ (uint16_t)(cipherdata->algtype | OP_PCL_IPSEC_HMAC_MD5_96));
+ /* Swap SEQINPTR to SEQOUTPTR. */
+ move_seqout_ptr = MOVE(p, DESCBUF, 0, MATH1, 0, 16, WAITCOMP | IMMED);
+ MATHB(p, MATH1, AND, ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR), MATH1,
+ 8, IFB | IMMED2);
+/*
+ * TODO: RTA currently doesn't support creating a LOAD command
+ * with another command as IMM.
+ * To be changed when proper support is added in RTA.
+ */
+ LOAD(p, 0xa00000e5, MATH3, 4, 4, IMMED);
+ MATHB(p, MATH3, SHLD, MATH3, MATH3, 8, 0);
+ write_swapped_seqin_ptr = MOVE(p, MATH1, 0, DESCBUF, 0, 20, WAITCOMP |
+ IMMED);
+ swapped_seqin_ptr_jump = JUMP(p, swapped_seqin_ptr, LOCAL_JUMP,
+ ALL_TRUE, 0);
+ LOAD(p, LDST_SRCDST_WORD_CLRW | CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1CTX | CLRW_CLR_C1KEY | CLRW_RESET_CLS1_CHA, CLRW, 0, 4,
+ 0);
+ SEQOUTPTR(p, 0, 65535, RTO);
+ move_outlen = MOVE(p, DESCBUF, 0, MATH0, 4, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH0, SUB,
+ (uint64_t)(pdb->ip_hdr_len + IPSEC_ICV_MD5_TRUNC_SIZE),
+ VSEQINSZ, 4, IMMED2);
+ MATHB(p, MATH0, SUB, IPSEC_ICV_MD5_TRUNC_SIZE, VSEQOUTSZ, 4, IMMED2);
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, OP_ALG_AAI_XCBC_MAC,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, DIR_ENC);
+ SEQFIFOLOAD(p, SKIP, pdb->ip_hdr_len, 0);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | FLUSH1 | LAST1);
+ SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+ SEQSTORE(p, CONTEXT1, 0, IPSEC_ICV_MD5_TRUNC_SIZE, 0);
+/*
+ * TODO: RTA currently doesn't support adding labels in or after Job Descriptor.
+ * To be changed when proper support is added in RTA.
+ */
+ /* Label the Shared Descriptor Pointer */
+ SET_LABEL(p, shd_ptr);
+ shd_ptr += 1;
+ /* Label the Output Pointer */
+ SET_LABEL(p, outptr);
+ outptr += 3;
+ /* Label the first word after JD */
+ SET_LABEL(p, swapped_seqin_fields);
+ swapped_seqin_fields += 8;
+ /* Label the second word after JD */
+ SET_LABEL(p, swapped_seqin_ptr);
+ swapped_seqin_ptr += 9;
+
+ PATCH_HDR(p, phdr, hdr);
+ PATCH_JUMP(p, pkeyjump, keyjump);
+ PATCH_JUMP(p, swapped_seqin_ptr_jump, swapped_seqin_ptr);
+ PATCH_MOVE(p, move_outlen, outptr);
+ PATCH_MOVE(p, move_seqout_ptr, shd_ptr);
+ PATCH_MOVE(p, write_swapped_seqin_ptr, swapped_seqin_fields);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_ipsec_decap_des_aes_xcbc - IPSec DES-CBC/3DES-CBC and
+ * AES-XCBC-MAC-96 ESP decapsulation shared descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for a details of the encapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_PCL_IPSEC_DES, OP_PCL_IPSEC_3DES.
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm value: OP_PCL_IPSEC_AES_XCBC_MAC_96.
+ *
+ * Supported only for platforms with 32-bit address pointers and SEC ERA 4 or
+ * higher. The tunnel/transport mode of the IPsec ESP is supported only if the
+ * Outer/Transport IP Header is present in the decapsulation input packet.
+ * The descriptor computes the AES-XCBC-MAC-96 to check if the received ICV
+ * is correct, rereads the input packet to compute the MD5 ICV, overwrites
+ * the XCBC ICV, and then sends the modified input packet to the
+ * DES-CBC/3DES-CBC & HMAC-MD5-96 IPsec.
+ * The descriptor uses all the benefits of the built-in protocol by computing
+ * the IPsec ESP with a hardware supported algorithms combination
+ * (DES-CBC/3DES-CBC & HMAC-MD5-96). The HMAC-MD5 authentication algorithm
+ * was chosen in order to speed up the computational time for this intermediate
+ * step.
+ * Warning: The user must allocate at least 32 bytes for the authentication key
+ * (in order to use it also with HMAC-MD5-96),even when using a shorter key
+ * for the AES-XCBC-MAC-96.
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_decap_des_aes_xcbc(uint32_t *descbuf,
+ struct ipsec_decap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint32_t ip_hdr_len = (pdb->options & PDBHDRLEN_MASK) >>
+ PDBHDRLEN_ESP_DECAP_SHIFT;
+
+ LABEL(hdr);
+ LABEL(jump_cmd);
+ LABEL(keyjump);
+ LABEL(outlen);
+ LABEL(seqin_ptr);
+ LABEL(seqout_ptr);
+ LABEL(swapped_seqout_fields);
+ LABEL(swapped_seqout_ptr);
+ REFERENCE(seqout_ptr_jump);
+ REFERENCE(phdr);
+ REFERENCE(pkeyjump);
+ REFERENCE(move_jump);
+ REFERENCE(move_jump_back);
+ REFERENCE(move_seqin_ptr);
+ REFERENCE(swapped_seqout_ptr_jump);
+ REFERENCE(write_swapped_seqout_ptr);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_decap_pdb(p, pdb, cipherdata->algtype);
+ SET_LABEL(p, hdr);
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF);
+ /*
+ * Hard-coded KEY arguments. The descriptor uses all the benefits of
+ * the built-in protocol by computing the IPsec ESP with a hardware
+ * supported algorithms combination (DES-CBC/3DES-CBC & HMAC-MD5-96).
+ * The HMAC-MD5 authentication algorithm was chosen with
+ * the keys options from bellow in order to speed up the computational
+ * time for this intermediate step.
+ * Warning: The user must allocate at least 32 bytes for
+ * the authentication key (in order to use it also with HMAC-MD5-96),
+ * even when using a shorter key for the AES-XCBC-MAC-96.
+ */
+ KEY(p, MDHA_SPLIT_KEY, 0, authdata->key, 32, INLINE_KEY(authdata));
+ SET_LABEL(p, keyjump);
+ LOAD(p, LDST_SRCDST_WORD_CLRW | CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1CTX | CLRW_CLR_C1KEY | CLRW_RESET_CLS1_CHA, CLRW, 0, 4,
+ 0);
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+ MATHB(p, SEQINSZ, SUB,
+ (uint64_t)(ip_hdr_len + IPSEC_ICV_MD5_TRUNC_SIZE), MATH0, 4,
+ IMMED2);
+ MATHB(p, MATH0, SUB, ZERO, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_MD5, OP_ALG_AAI_HMAC_PRECOMP,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, DIR_ENC);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, OP_ALG_AAI_XCBC_MAC,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_ENABLE, DIR_DEC);
+ SEQFIFOLOAD(p, SKIP, ip_hdr_len, 0);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | FLUSH1);
+ SEQFIFOLOAD(p, ICV1, IPSEC_ICV_MD5_TRUNC_SIZE, FLUSH1 | LAST1);
+ /* Swap SEQOUTPTR to SEQINPTR. */
+ move_seqin_ptr = MOVE(p, DESCBUF, 0, MATH1, 0, 16, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR, MATH1, 8,
+ IFB | IMMED2);
+/*
+ * TODO: RTA currently doesn't support creating a LOAD command
+ * with another command as IMM.
+ * To be changed when proper support is added in RTA.
+ */
+ LOAD(p, 0xA00000e1, MATH3, 4, 4, IMMED);
+ MATHB(p, MATH3, SHLD, MATH3, MATH3, 8, 0);
+ write_swapped_seqout_ptr = MOVE(p, MATH1, 0, DESCBUF, 0, 20, WAITCOMP |
+ IMMED);
+ swapped_seqout_ptr_jump = JUMP(p, swapped_seqout_ptr, LOCAL_JUMP,
+ ALL_TRUE, 0);
+/*
+ * TODO: To be changed when proper support is added in RTA (can't load
+ * a command that is also written by RTA).
+ * Change when proper RTA support is added.
+ */
+ SET_LABEL(p, jump_cmd);
+ WORD(p, 0xA00000f3);
+ SEQINPTR(p, 0, 65535, RTO);
+ MATHB(p, MATH0, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, MATH0, ADD, ip_hdr_len, VSEQOUTSZ, 4, IMMED2);
+ move_jump = MOVE(p, DESCBUF, 0, OFIFO, 0, 8, WAITCOMP | IMMED);
+ move_jump_back = MOVE(p, OFIFO, 0, DESCBUF, 0, 8, IMMED);
+ SEQFIFOLOAD(p, SKIP, ip_hdr_len, 0);
+ SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
+ SEQFIFOSTORE(p, SKIP, 0, 0, VLF);
+ SEQSTORE(p, CONTEXT2, 0, IPSEC_ICV_MD5_TRUNC_SIZE, 0);
+ seqout_ptr_jump = JUMP(p, seqout_ptr, LOCAL_JUMP, ALL_TRUE, CALM);
+
+ LOAD(p, LDST_SRCDST_WORD_CLRW | CLRW_CLR_C1MODE | CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1CTX | CLRW_CLR_C1KEY | CLRW_CLR_C2MODE |
+ CLRW_CLR_C2DATAS | CLRW_CLR_C2CTX | CLRW_RESET_CLS1_CHA, CLRW, 0,
+ 4, 0);
+ SEQINPTR(p, 0, 65535, RTO);
+ MATHB(p, MATH0, ADD,
+ (uint64_t)(ip_hdr_len + IPSEC_ICV_MD5_TRUNC_SIZE), SEQINSZ, 4,
+ IMMED2);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ PROTOCOL(p, OP_TYPE_DECAP_PROTOCOL, OP_PCLID_IPSEC,
+ (uint16_t)(cipherdata->algtype | OP_PCL_IPSEC_HMAC_MD5_96));
+/*
+ * TODO: RTA currently doesn't support adding labels in or after Job Descriptor.
+ * To be changed when proper support is added in RTA.
+ */
+ /* Label the SEQ OUT PTR */
+ SET_LABEL(p, seqout_ptr);
+ seqout_ptr += 2;
+ /* Label the Output Length */
+ SET_LABEL(p, outlen);
+ outlen += 4;
+ /* Label the SEQ IN PTR */
+ SET_LABEL(p, seqin_ptr);
+ seqin_ptr += 5;
+ /* Label the first word after JD */
+ SET_LABEL(p, swapped_seqout_fields);
+ swapped_seqout_fields += 8;
+ /* Label the second word after JD */
+ SET_LABEL(p, swapped_seqout_ptr);
+ swapped_seqout_ptr += 9;
+
+ PATCH_HDR(p, phdr, hdr);
+ PATCH_JUMP(p, pkeyjump, keyjump);
+ PATCH_JUMP(p, seqout_ptr_jump, seqout_ptr);
+ PATCH_JUMP(p, swapped_seqout_ptr_jump, swapped_seqout_ptr);
+ PATCH_MOVE(p, move_jump, jump_cmd);
+ PATCH_MOVE(p, move_jump_back, seqin_ptr);
+ PATCH_MOVE(p, move_seqin_ptr, outlen);
+ PATCH_MOVE(p, write_swapped_seqout_ptr, swapped_seqout_fields);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * IPSEC_NEW_ENC_BASE_DESC_LEN - IPsec new mode encap shared descriptor length
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether Outer IP Header and/or keys can be inlined or
+ * not. To be used as first parameter of rta_inline_query().
+ */
+#define IPSEC_NEW_ENC_BASE_DESC_LEN (5 * CAAM_CMD_SZ + \
+ sizeof(struct ipsec_encap_pdb))
+
+/**
+ * IPSEC_NEW_NULL_ENC_BASE_DESC_LEN - IPsec new mode encap shared descriptor
+ * length for the case of
+ * NULL encryption / authentication
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether Outer IP Header and/or key can be inlined or
+ * not. To be used as first parameter of rta_inline_query().
+ */
+#define IPSEC_NEW_NULL_ENC_BASE_DESC_LEN (4 * CAAM_CMD_SZ + \
+ sizeof(struct ipsec_encap_pdb))
+
+/**
+ * cnstr_shdsc_ipsec_new_encap - IPSec new mode ESP encapsulation
+ * protocol-level shared descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for details about the encapsulation PDB.
+ * @opt_ip_hdr: pointer to Optional IP Header
+ * -if OIHI = PDBOPTS_ESP_OIHI_PDB_INL, opt_ip_hdr points to the buffer to
+ * be inlined in the PDB. Number of bytes (buffer size) copied is provided
+ * in pdb->ip_hdr_len.
+ * -if OIHI = PDBOPTS_ESP_OIHI_PDB_REF, opt_ip_hdr points to the address of
+ * the Optional IP Header. The address will be inlined in the PDB verbatim.
+ * -for other values of OIHI options field, opt_ip_hdr is not used.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ * @authdata: pointer to authentication transform definitions.
+ * If an authentication key is required by the protocol, a "normal"
+ * key must be provided; DKP (Derived Key Protocol) will be used to
+ * compute MDHA on the fly in HW.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
+ bool swap,
+ struct ipsec_encap_pdb *pdb,
+ uint8_t *opt_ip_hdr,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ REFERENCE(pkeyjmp);
+ LABEL(hdr);
+ REFERENCE(phdr);
+
+ if (rta_sec_era < RTA_SEC_ERA_8) {
+ pr_err("IPsec new mode encap: available only for Era %d or above\n",
+ USER_SEC_ERA(RTA_SEC_ERA_8));
+ return -ENOTSUP;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+
+ __rta_copy_ipsec_encap_pdb(p, pdb, cipherdata->algtype);
+
+ switch (pdb->options & PDBOPTS_ESP_OIHI_MASK) {
+ case PDBOPTS_ESP_OIHI_PDB_INL:
+ COPY_DATA(p, opt_ip_hdr, pdb->ip_hdr_len);
+ break;
+ case PDBOPTS_ESP_OIHI_PDB_REF:
+ if (ps)
+ COPY_DATA(p, opt_ip_hdr, 8);
+ else
+ COPY_DATA(p, opt_ip_hdr, 4);
+ break;
+ default:
+ break;
+ }
+ SET_LABEL(p, hdr);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+ if (authdata->keylen)
+ __gen_auth_key(p, authdata);
+ if (cipherdata->keylen)
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, keyjmp);
+ PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL,
+ OP_PCLID_IPSEC_NEW,
+ (uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_HDR(p, phdr, hdr);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * IPSEC_NEW_DEC_BASE_DESC_LEN - IPsec new mode decap shared descriptor length
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether keys can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_NEW_DEC_BASE_DESC_LEN (5 * CAAM_CMD_SZ + \
+ sizeof(struct ipsec_decap_pdb))
+
+/**
+ * IPSEC_NEW_NULL_DEC_BASE_DESC_LEN - IPsec new mode decap shared descriptor
+ * length for the case of
+ * NULL decryption / authentication
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether key can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_NEW_NULL_DEC_BASE_DESC_LEN (4 * CAAM_CMD_SZ + \
+ sizeof(struct ipsec_decap_pdb))
+
+/**
+ * cnstr_shdsc_ipsec_new_decap - IPSec new mode ESP decapsulation protocol-level
+ * shared descriptor.
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @pdb: pointer to the PDB to be used with this descriptor
+ * This structure will be copied inline to the descriptor under
+ * construction. No error checking will be made. Refer to the
+ * block guide for details about the decapsulation PDB.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values 0 one of OP_PCL_IPSEC_*
+ * @authdata: pointer to authentication transform definitions.
+ * If an authentication key is required by the protocol, a "normal"
+ * key must be provided; DKP (Derived Key Protocol) will be used to
+ * compute MDHA on the fly in HW.
+ * Valid algorithm values - one of OP_PCL_IPSEC_*
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_ipsec_new_decap(uint32_t *descbuf, bool ps,
+ bool swap,
+ struct ipsec_decap_pdb *pdb,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ LABEL(keyjmp);
+ REFERENCE(pkeyjmp);
+ LABEL(hdr);
+ REFERENCE(phdr);
+
+ if (rta_sec_era < RTA_SEC_ERA_8) {
+ pr_err("IPsec new mode decap: available only for Era %d or above\n",
+ USER_SEC_ERA(RTA_SEC_ERA_8));
+ return -ENOTSUP;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+ phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ __rta_copy_ipsec_decap_pdb(p, pdb, cipherdata->algtype);
+ SET_LABEL(p, hdr);
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+ if (authdata->keylen)
+ __gen_auth_key(p, authdata);
+ if (cipherdata->keylen)
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, keyjmp);
+ PROTOCOL(p, OP_TYPE_DECAP_PROTOCOL,
+ OP_PCLID_IPSEC_NEW,
+ (uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_HDR(p, phdr, hdr);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * IPSEC_AUTH_VAR_BASE_DESC_LEN - IPsec encap/decap shared descriptor length
+ * for the case of variable-length authentication
+ * only data.
+ * Note: Only for SoCs with SEC_ERA >= 3.
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether keys can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_AUTH_VAR_BASE_DESC_LEN (27 * CAAM_CMD_SZ)
+
+/**
+ * IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN - IPsec AES decap shared descriptor
+ * length for variable-length authentication only
+ * data.
+ * Note: Only for SoCs with SEC_ERA >= 3.
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether key can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN \
+ (IPSEC_AUTH_VAR_BASE_DESC_LEN + CAAM_CMD_SZ)
+
+/**
+ * IPSEC_AUTH_BASE_DESC_LEN - IPsec encap/decap shared descriptor length
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether key can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_AUTH_BASE_DESC_LEN (19 * CAAM_CMD_SZ)
+
+/**
+ * IPSEC_AUTH_AES_DEC_BASE_DESC_LEN - IPsec AES decap shared descriptor length
+ *
+ * Accounts only for the "base" commands and is intended to be used by upper
+ * layers to determine whether key can be inlined or not. To be used as first
+ * parameter of rta_inline_query().
+ */
+#define IPSEC_AUTH_AES_DEC_BASE_DESC_LEN (IPSEC_AUTH_BASE_DESC_LEN + \
+ CAAM_CMD_SZ)
+
+/**
+ * cnstr_shdsc_authenc - authenc-like descriptor
+ * @descbuf: pointer to buffer used for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: if true, perform descriptor byte swapping on a 4-byte boundary
+ * @cipherdata: ointer to block cipher transform definitions.
+ * Valid algorithm values one of OP_ALG_ALGSEL_* {DES, 3DES, AES}
+ * @authdata: pointer to authentication transform definitions.
+ * Valid algorithm values - one of OP_ALG_ALGSEL_* {MD5, SHA1,
+ * SHA224, SHA256, SHA384, SHA512}
+ * Note: The key for authentication is supposed to be given as plain text.
+ * Note: There's no support for keys longer than the block size of the
+ * underlying hash function, according to the selected algorithm.
+ *
+ * @ivlen: length of the IV to be read from the input frame, before any data
+ * to be processed
+ * @auth_only_len: length of the data to be authenticated-only (commonly IP
+ * header, IV, Sequence number and SPI)
+ * Note: Extended Sequence Number processing is NOT supported
+ *
+ * @trunc_len: the length of the ICV to be written to the output frame. If 0,
+ * then the corresponding length of the digest, according to the
+ * selected algorithm shall be used.
+ * @dir: Protocol direction, encapsulation or decapsulation (DIR_ENC/DIR_DEC)
+ *
+ * Note: Here's how the input frame needs to be formatted so that the processing
+ * will be done correctly:
+ * For encapsulation:
+ * Input:
+ * +----+----------------+---------------------------------------------+
+ * | IV | Auth-only data | Padded data to be authenticated & Encrypted |
+ * +----+----------------+---------------------------------------------+
+ * Output:
+ * +--------------------------------------+
+ * | Authenticated & Encrypted data | ICV |
+ * +--------------------------------+-----+
+
+ * For decapsulation:
+ * Input:
+ * +----+----------------+--------------------------------+-----+
+ * | IV | Auth-only data | Authenticated & Encrypted data | ICV |
+ * +----+----------------+--------------------------------+-----+
+ * Output:
+ * +----+--------------------------+
+ * | Decrypted & authenticated data |
+ * +----+--------------------------+
+ *
+ * Note: This descriptor can use per-packet commands, encoded as below in the
+ * DPOVRD register:
+ * 32 24 16 0
+ * +------+---------------------+
+ * | 0x80 | 0x00| auth_only_len |
+ * +------+---------------------+
+ *
+ * This mechanism is available only for SoCs having SEC ERA >= 3. In other
+ * words, this will not work for P4080TO2
+ *
+ * Note: The descriptor does not add any kind of padding to the input data,
+ * so the upper layer needs to ensure that the data is padded properly,
+ * according to the selected cipher. Failure to do so will result in
+ * the descriptor failing with a data-size error.
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_authenc(uint32_t *descbuf, bool ps, bool swap,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ uint16_t ivlen, uint16_t auth_only_len,
+ uint8_t trunc_len, uint8_t dir)
+{
+ struct program prg;
+ struct program *p = &prg;
+ const bool is_aes_dec = (dir == DIR_DEC) &&
+ (cipherdata->algtype == OP_ALG_ALGSEL_AES);
+
+ LABEL(skip_patch_len);
+ LABEL(keyjmp);
+ LABEL(skipkeys);
+ LABEL(aonly_len_offset);
+ REFERENCE(pskip_patch_len);
+ REFERENCE(pkeyjmp);
+ REFERENCE(pskipkeys);
+ REFERENCE(read_len);
+ REFERENCE(write_len);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ /*
+ * Since we currently assume that key length is equal to hash digest
+ * size, it's ok to truncate keylen value.
+ */
+ trunc_len = trunc_len && (trunc_len < authdata->keylen) ?
+ trunc_len : (uint8_t)authdata->keylen;
+
+ SHR_HDR(p, SHR_SERIAL, 1, SC);
+
+ /*
+ * M0 will contain the value provided by the user when creating
+ * the shared descriptor. If the user provided an override in
+ * DPOVRD, then M0 will contain that value
+ */
+ MATHB(p, MATH0, ADD, auth_only_len, MATH0, 4, IMMED2);
+
+ if (rta_sec_era >= RTA_SEC_ERA_3) {
+ /*
+ * Check if the user wants to override the auth-only len
+ */
+ MATHB(p, DPOVRD, ADD, 0x80000000, MATH2, 4, IMMED2);
+
+ /*
+ * No need to patch the length of the auth-only data read if
+ * the user did not override it
+ */
+ pskip_patch_len = JUMP(p, skip_patch_len, LOCAL_JUMP, ALL_TRUE,
+ MATH_N);
+
+ /* Get auth-only len in M0 */
+ MATHB(p, MATH2, AND, 0xFFFF, MATH0, 4, IMMED2);
+
+ /*
+ * Since M0 is used in calculations, don't mangle it, copy
+ * its content to M1 and use this for patching.
+ */
+ MATHB(p, MATH0, ADD, MATH1, MATH1, 4, 0);
+
+ read_len = MOVE(p, DESCBUF, 0, MATH1, 0, 6, WAITCOMP | IMMED);
+ write_len = MOVE(p, MATH1, 0, DESCBUF, 0, 8, WAITCOMP | IMMED);
+
+ SET_LABEL(p, skip_patch_len);
+ }
+ /*
+ * MATH0 contains the value in DPOVRD w/o the MSB, or the initial
+ * value, as provided by the user at descriptor creation time
+ */
+ if (dir == DIR_ENC)
+ MATHB(p, MATH0, ADD, ivlen, MATH0, 4, IMMED2);
+ else
+ MATHB(p, MATH0, ADD, ivlen + trunc_len, MATH0, 4, IMMED2);
+
+ pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
+
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ /* Insert Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ /* Do operation */
+ ALG_OPERATION(p, authdata->algtype, OP_ALG_AAI_HMAC,
+ OP_ALG_AS_INITFINAL,
+ dir == DIR_ENC ? ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ dir);
+
+ if (is_aes_dec)
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
+ pskipkeys = JUMP(p, skipkeys, LOCAL_JUMP, ALL_TRUE, 0);
+
+ SET_LABEL(p, keyjmp);
+
+ ALG_OPERATION(p, authdata->algtype, OP_ALG_AAI_HMAC_PRECOMP,
+ OP_ALG_AS_INITFINAL,
+ dir == DIR_ENC ? ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ dir);
+
+ if (is_aes_dec) {
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES, cipherdata->algmode |
+ OP_ALG_AAI_DK, OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE, dir);
+ SET_LABEL(p, skipkeys);
+ } else {
+ SET_LABEL(p, skipkeys);
+ ALG_OPERATION(p, cipherdata->algtype, cipherdata->algmode,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE, dir);
+ }
+
+ /*
+ * Prepare the length of the data to be both encrypted/decrypted
+ * and authenticated/checked
+ */
+ MATHB(p, SEQINSZ, SUB, MATH0, VSEQINSZ, 4, 0);
+
+ MATHB(p, VSEQINSZ, SUB, MATH3, VSEQOUTSZ, 4, 0);
+
+ /* Prepare for writing the output frame */
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ SET_LABEL(p, aonly_len_offset);
+
+ /* Read IV */
+ SEQLOAD(p, CONTEXT1, 0, ivlen, 0);
+
+ /*
+ * Read data needed only for authentication. This is overwritten above
+ * if the user requested it.
+ */
+ SEQFIFOLOAD(p, MSG2, auth_only_len, 0);
+
+ if (dir == DIR_ENC) {
+ /*
+ * Read input plaintext, encrypt and authenticate & write to
+ * output
+ */
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+
+ /* Finally, write the ICV */
+ SEQSTORE(p, CONTEXT2, 0, trunc_len, 0);
+ } else {
+ /*
+ * Read input ciphertext, decrypt and authenticate & write to
+ * output
+ */
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+
+ /* Read the ICV to check */
+ SEQFIFOLOAD(p, ICV2, trunc_len, LAST2);
+ }
+
+ PATCH_JUMP(p, pkeyjmp, keyjmp);
+ PATCH_JUMP(p, pskipkeys, skipkeys);
+ PATCH_JUMP(p, pskipkeys, skipkeys);
+
+ if (rta_sec_era >= RTA_SEC_ERA_3) {
+ PATCH_JUMP(p, pskip_patch_len, skip_patch_len);
+ PATCH_MOVE(p, read_len, aonly_len_offset);
+ PATCH_MOVE(p, write_len, aonly_len_offset);
+ }
+
+ return PROGRAM_FINALIZE(p);
+}
+
+#endif /* __DESC_IPSEC_H__ */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta.h
new file mode 100644
index 00000000..7cf4c8a9
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta.h
@@ -0,0 +1,954 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_RTA_H__
+#define __RTA_RTA_H__
+
+#include "rta/sec_run_time_asm.h"
+#include "rta/fifo_load_store_cmd.h"
+#include "rta/header_cmd.h"
+#include "rta/jump_cmd.h"
+#include "rta/key_cmd.h"
+#include "rta/load_cmd.h"
+#include "rta/math_cmd.h"
+#include "rta/move_cmd.h"
+#include "rta/nfifo_cmd.h"
+#include "rta/operation_cmd.h"
+#include "rta/protocol_cmd.h"
+#include "rta/seq_in_out_ptr_cmd.h"
+#include "rta/signature_cmd.h"
+#include "rta/store_cmd.h"
+
+/**
+ * DOC: About
+ *
+ * RTA (Runtime Assembler) Library is an easy and flexible runtime method for
+ * writing SEC descriptors. It implements a thin abstraction layer above
+ * SEC commands set; the resulting code is compact and similar to a
+ * descriptor sequence.
+ *
+ * RTA library improves comprehension of the SEC code, adds flexibility for
+ * writing complex descriptors and keeps the code lightweight. Should be used
+ * by whom needs to encode descriptors at runtime, with comprehensible flow
+ * control in descriptor.
+ */
+
+/**
+ * DOC: Usage
+ *
+ * RTA is used in kernel space by the SEC / CAAM (Cryptographic Acceleration and
+ * Assurance Module) kernel module (drivers/crypto/caam) and SEC / CAAM QI
+ * kernel module (Freescale QorIQ SDK).
+ *
+ * RTA is used in user space by USDPAA - User Space DataPath Acceleration
+ * Architecture (Freescale QorIQ SDK).
+ */
+
+/**
+ * DOC: Descriptor Buffer Management Routines
+ *
+ * Contains details of RTA descriptor buffer management and SEC Era
+ * management routines.
+ */
+
+/**
+ * PROGRAM_CNTXT_INIT - must be called before any descriptor run-time assembly
+ * call type field carry info i.e. whether descriptor is
+ * shared or job descriptor.
+ * @program: pointer to struct program
+ * @buffer: input buffer where the descriptor will be placed (uint32_t *)
+ * @offset: offset in input buffer from where the data will be written
+ * (unsigned int)
+ */
+#define PROGRAM_CNTXT_INIT(program, buffer, offset) \
+ rta_program_cntxt_init(program, buffer, offset)
+
+/**
+ * PROGRAM_FINALIZE - must be called to mark completion of RTA call.
+ * @program: pointer to struct program
+ *
+ * Return: total size of the descriptor in words or negative number on error.
+ */
+#define PROGRAM_FINALIZE(program) rta_program_finalize(program)
+
+/**
+ * PROGRAM_SET_36BIT_ADDR - must be called to set pointer size to 36 bits
+ * @program: pointer to struct program
+ *
+ * Return: current size of the descriptor in words (unsigned int).
+ */
+#define PROGRAM_SET_36BIT_ADDR(program) rta_program_set_36bit_addr(program)
+
+/**
+ * PROGRAM_SET_BSWAP - must be called to enable byte swapping
+ * @program: pointer to struct program
+ *
+ * Byte swapping on a 4-byte boundary will be performed at the end - when
+ * calling PROGRAM_FINALIZE().
+ *
+ * Return: current size of the descriptor in words (unsigned int).
+ */
+#define PROGRAM_SET_BSWAP(program) rta_program_set_bswap(program)
+
+/**
+ * WORD - must be called to insert in descriptor buffer a 32bit value
+ * @program: pointer to struct program
+ * @val: input value to be written in descriptor buffer (uint32_t)
+ *
+ * Return: the descriptor buffer offset where this command is inserted
+ * (unsigned int).
+ */
+#define WORD(program, val) rta_word(program, val)
+
+/**
+ * DWORD - must be called to insert in descriptor buffer a 64bit value
+ * @program: pointer to struct program
+ * @val: input value to be written in descriptor buffer (uint64_t)
+ *
+ * Return: the descriptor buffer offset where this command is inserted
+ * (unsigned int).
+ */
+#define DWORD(program, val) rta_dword(program, val)
+
+/**
+ * COPY_DATA - must be called to insert in descriptor buffer data larger than
+ * 64bits.
+ * @program: pointer to struct program
+ * @data: input data to be written in descriptor buffer (uint8_t *)
+ * @len: length of input data (unsigned int)
+ *
+ * Return: the descriptor buffer offset where this command is inserted
+ * (unsigned int).
+ */
+#define COPY_DATA(program, data, len) rta_copy_data(program, (data), (len))
+
+/**
+ * DESC_LEN - determines job / shared descriptor buffer length (in words)
+ * @buffer: descriptor buffer (uint32_t *)
+ *
+ * Return: descriptor buffer length in words (unsigned int).
+ */
+#define DESC_LEN(buffer) rta_desc_len(buffer)
+
+/**
+ * DESC_BYTES - determines job / shared descriptor buffer length (in bytes)
+ * @buffer: descriptor buffer (uint32_t *)
+ *
+ * Return: descriptor buffer length in bytes (unsigned int).
+ */
+#define DESC_BYTES(buffer) rta_desc_bytes(buffer)
+
+/*
+ * SEC HW block revision.
+ *
+ * This *must not be confused with SEC version*:
+ * - SEC HW block revision format is "v"
+ * - SEC revision format is "x.y"
+ */
+extern enum rta_sec_era rta_sec_era;
+
+/**
+ * rta_set_sec_era - Set SEC Era HW block revision for which the RTA library
+ * will generate the descriptors.
+ * @era: SEC Era (enum rta_sec_era)
+ *
+ * Return: 0 if the ERA was set successfully, -1 otherwise (int)
+ *
+ * Warning 1: Must be called *only once*, *before* using any other RTA API
+ * routine.
+ *
+ * Warning 2: *Not thread safe*.
+ */
+static inline int
+rta_set_sec_era(enum rta_sec_era era)
+{
+ if (era > MAX_SEC_ERA) {
+ rta_sec_era = DEFAULT_SEC_ERA;
+ pr_err("Unsupported SEC ERA. Defaulting to ERA %d\n",
+ DEFAULT_SEC_ERA + 1);
+ return -1;
+ }
+
+ rta_sec_era = era;
+ return 0;
+}
+
+/**
+ * rta_get_sec_era - Get SEC Era HW block revision for which the RTA library
+ * will generate the descriptors.
+ *
+ * Return: SEC Era (unsigned int).
+ */
+static inline unsigned int
+rta_get_sec_era(void)
+{
+ return rta_sec_era;
+}
+
+/**
+ * DOC: SEC Commands Routines
+ *
+ * Contains details of RTA wrapper routines over SEC engine commands.
+ */
+
+/**
+ * SHR_HDR - Configures Shared Descriptor HEADER command
+ * @program: pointer to struct program
+ * @share: descriptor share state (enum rta_share_type)
+ * @start_idx: index in descriptor buffer where the execution of the shared
+ * descriptor should start (@c unsigned int).
+ * @flags: operational flags: RIF, DNR, CIF, SC, PD
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SHR_HDR(program, share, start_idx, flags) \
+ rta_shr_header(program, share, start_idx, flags)
+
+/**
+ * JOB_HDR - Configures JOB Descriptor HEADER command
+ * @program: pointer to struct program
+ * @share: descriptor share state (enum rta_share_type)
+ * @start_idx: index in descriptor buffer where the execution of the job
+ * descriptor should start (unsigned int). In case SHR bit is present
+ * in flags, this will be the shared descriptor length.
+ * @share_desc: pointer to shared descriptor, in case SHR bit is set (uint64_t)
+ * @flags: operational flags: RSMS, DNR, TD, MTD, REO, SHR
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JOB_HDR(program, share, start_idx, share_desc, flags) \
+ rta_job_header(program, share, start_idx, share_desc, flags, 0)
+
+/**
+ * JOB_HDR_EXT - Configures JOB Descriptor HEADER command
+ * @program: pointer to struct program
+ * @share: descriptor share state (enum rta_share_type)
+ * @start_idx: index in descriptor buffer where the execution of the job
+ * descriptor should start (unsigned int). In case SHR bit is present
+ * in flags, this will be the shared descriptor length.
+ * @share_desc: pointer to shared descriptor, in case SHR bit is set (uint64_t)
+ * @flags: operational flags: RSMS, DNR, TD, MTD, REO, SHR
+ * @ext_flags: extended header flags: DSV (DECO Select Valid), DECO Id (limited
+ * by DSEL_MASK).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JOB_HDR_EXT(program, share, start_idx, share_desc, flags, ext_flags) \
+ rta_job_header(program, share, start_idx, share_desc, flags | EXT, \
+ ext_flags)
+
+/**
+ * MOVE - Configures MOVE and MOVE_LEN commands
+ * @program: pointer to struct program
+ * @src: internal source of data that will be moved: CONTEXT1, CONTEXT2, OFIFO,
+ * DESCBUF, MATH0-MATH3, IFIFOABD, IFIFOAB1, IFIFOAB2, AB1, AB2, ABD.
+ * @src_offset: offset in source data (uint16_t)
+ * @dst: internal destination of data that will be moved: CONTEXT1, CONTEXT2,
+ * OFIFO, DESCBUF, MATH0-MATH3, IFIFOAB1, IFIFOAB2, IFIFO, PKA, KEY1,
+ * KEY2, ALTSOURCE.
+ * @dst_offset: offset in destination data (uint16_t)
+ * @length: size of data to be moved: for MOVE must be specified as immediate
+ * value and IMMED flag must be set; for MOVE_LEN must be specified
+ * using MATH0-MATH3.
+ * @opt: operational flags: WAITCOMP, FLUSH1, FLUSH2, LAST1, LAST2, SIZE_WORD,
+ * SIZE_BYTE, SIZE_DWORD, IMMED (not valid for MOVE_LEN).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MOVE(program, src, src_offset, dst, dst_offset, length, opt) \
+ rta_move(program, __MOVE, src, src_offset, dst, dst_offset, length, opt)
+
+/**
+ * MOVEB - Configures MOVEB command
+ * @program: pointer to struct program
+ * @src: internal source of data that will be moved: CONTEXT1, CONTEXT2, OFIFO,
+ * DESCBUF, MATH0-MATH3, IFIFOABD, IFIFOAB1, IFIFOAB2, AB1, AB2, ABD.
+ * @src_offset: offset in source data (uint16_t)
+ * @dst: internal destination of data that will be moved: CONTEXT1, CONTEXT2,
+ * OFIFO, DESCBUF, MATH0-MATH3, IFIFOAB1, IFIFOAB2, IFIFO, PKA, KEY1,
+ * KEY2, ALTSOURCE.
+ * @dst_offset: offset in destination data (uint16_t)
+ * @length: size of data to be moved: for MOVE must be specified as immediate
+ * value and IMMED flag must be set; for MOVE_LEN must be specified
+ * using MATH0-MATH3.
+ * @opt: operational flags: WAITCOMP, FLUSH1, FLUSH2, LAST1, LAST2, SIZE_WORD,
+ * SIZE_BYTE, SIZE_DWORD, IMMED (not valid for MOVE_LEN).
+ *
+ * Identical with MOVE command if byte swapping not enabled; else - when src/dst
+ * is descriptor buffer or MATH registers, data type is byte array when MOVE
+ * data type is 4-byte array and vice versa.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MOVEB(program, src, src_offset, dst, dst_offset, length, opt) \
+ rta_move(program, __MOVEB, src, src_offset, dst, dst_offset, length, \
+ opt)
+
+/**
+ * MOVEDW - Configures MOVEDW command
+ * @program: pointer to struct program
+ * @src: internal source of data that will be moved: CONTEXT1, CONTEXT2, OFIFO,
+ * DESCBUF, MATH0-MATH3, IFIFOABD, IFIFOAB1, IFIFOAB2, AB1, AB2, ABD.
+ * @src_offset: offset in source data (uint16_t)
+ * @dst: internal destination of data that will be moved: CONTEXT1, CONTEXT2,
+ * OFIFO, DESCBUF, MATH0-MATH3, IFIFOAB1, IFIFOAB2, IFIFO, PKA, KEY1,
+ * KEY2, ALTSOURCE.
+ * @dst_offset: offset in destination data (uint16_t)
+ * @length: size of data to be moved: for MOVE must be specified as immediate
+ * value and IMMED flag must be set; for MOVE_LEN must be specified
+ * using MATH0-MATH3.
+ * @opt: operational flags: WAITCOMP, FLUSH1, FLUSH2, LAST1, LAST2, SIZE_WORD,
+ * SIZE_BYTE, SIZE_DWORD, IMMED (not valid for MOVE_LEN).
+ *
+ * Identical with MOVE command, with the following differences: data type is
+ * 8-byte array; word swapping is performed when SEC is programmed in little
+ * endian mode.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MOVEDW(program, src, src_offset, dst, dst_offset, length, opt) \
+ rta_move(program, __MOVEDW, src, src_offset, dst, dst_offset, length, \
+ opt)
+
+/**
+ * FIFOLOAD - Configures FIFOLOAD command to load message data, PKHA data, IV,
+ * ICV, AAD and bit length message data into Input Data FIFO.
+ * @program: pointer to struct program
+ * @data: input data type to store: PKHA registers, IFIFO, MSG1, MSG2,
+ * MSGOUTSNOOP, MSGINSNOOP, IV1, IV2, AAD1, ICV1, ICV2, BIT_DATA, SKIP.
+ * @src: pointer or actual data in case of immediate load; IMMED, COPY and DCOPY
+ * flags indicate action taken (inline imm data, inline ptr, inline from
+ * ptr).
+ * @length: number of bytes to load (uint32_t)
+ * @flags: operational flags: SGF, IMMED, EXT, CLASS1, CLASS2, BOTH, FLUSH1,
+ * LAST1, LAST2, COPY, DCOPY.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define FIFOLOAD(program, data, src, length, flags) \
+ rta_fifo_load(program, data, src, length, flags)
+
+/**
+ * SEQFIFOLOAD - Configures SEQ FIFOLOAD command to load message data, PKHA
+ * data, IV, ICV, AAD and bit length message data into Input Data
+ * FIFO.
+ * @program: pointer to struct program
+ * @data: input data type to store: PKHA registers, IFIFO, MSG1, MSG2,
+ * MSGOUTSNOOP, MSGINSNOOP, IV1, IV2, AAD1, ICV1, ICV2, BIT_DATA, SKIP.
+ * @length: number of bytes to load; can be set to 0 for SEQ command w/ VLF set
+ * (uint32_t).
+ * @flags: operational flags: VLF, CLASS1, CLASS2, BOTH, FLUSH1, LAST1, LAST2,
+ * AIDF.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQFIFOLOAD(program, data, length, flags) \
+ rta_fifo_load(program, data, NONE, length, flags|SEQ)
+
+/**
+ * FIFOSTORE - Configures FIFOSTORE command, to move data from Output Data FIFO
+ * to external memory via DMA.
+ * @program: pointer to struct program
+ * @data: output data type to store: PKHA registers, IFIFO, OFIFO, RNG,
+ * RNGOFIFO, AFHA_SBOX, MDHA_SPLIT_KEY, MSG, KEY1, KEY2, SKIP.
+ * @encrypt_flags: store data encryption mode: EKT, TK
+ * @dst: pointer to store location (uint64_t)
+ * @length: number of bytes to load (uint32_t)
+ * @flags: operational flags: SGF, CONT, EXT, CLASS1, CLASS2, BOTH
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define FIFOSTORE(program, data, encrypt_flags, dst, length, flags) \
+ rta_fifo_store(program, data, encrypt_flags, dst, length, flags)
+
+/**
+ * SEQFIFOSTORE - Configures SEQ FIFOSTORE command, to move data from Output
+ * Data FIFO to external memory via DMA.
+ * @program: pointer to struct program
+ * @data: output data type to store: PKHA registers, IFIFO, OFIFO, RNG,
+ * RNGOFIFO, AFHA_SBOX, MDHA_SPLIT_KEY, MSG, KEY1, KEY2, METADATA, SKIP.
+ * @encrypt_flags: store data encryption mode: EKT, TK
+ * @length: number of bytes to load; can be set to 0 for SEQ command w/ VLF set
+ * (uint32_t).
+ * @flags: operational flags: VLF, CONT, EXT, CLASS1, CLASS2, BOTH
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQFIFOSTORE(program, data, encrypt_flags, length, flags) \
+ rta_fifo_store(program, data, encrypt_flags, 0, length, flags|SEQ)
+
+/**
+ * KEY - Configures KEY and SEQ KEY commands
+ * @program: pointer to struct program
+ * @key_dst: key store location: KEY1, KEY2, PKE, AFHA_SBOX, MDHA_SPLIT_KEY
+ * @encrypt_flags: key encryption mode: ENC, EKT, TK, NWB, PTS
+ * @src: pointer or actual data in case of immediate load (uint64_t); IMMED,
+ * COPY and DCOPY flags indicate action taken (inline imm data,
+ * inline ptr, inline from ptr).
+ * @length: number of bytes to load; can be set to 0 for SEQ command w/ VLF set
+ * (uint32_t).
+ * @flags: operational flags: for KEY: SGF, IMMED, COPY, DCOPY; for SEQKEY: SEQ,
+ * VLF, AIDF.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define KEY(program, key_dst, encrypt_flags, src, length, flags) \
+ rta_key(program, key_dst, encrypt_flags, src, length, flags)
+
+/**
+ * SEQINPTR - Configures SEQ IN PTR command
+ * @program: pointer to struct program
+ * @src: starting address for Input Sequence (uint64_t)
+ * @length: number of bytes in (or to be added to) Input Sequence (uint32_t)
+ * @flags: operational flags: RBS, INL, SGF, PRE, EXT, RTO, RJD, SOP (when PRE,
+ * RTO or SOP are set, @src parameter must be 0).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQINPTR(program, src, length, flags) \
+ rta_seq_in_ptr(program, src, length, flags)
+
+/**
+ * SEQOUTPTR - Configures SEQ OUT PTR command
+ * @program: pointer to struct program
+ * @dst: starting address for Output Sequence (uint64_t)
+ * @length: number of bytes in (or to be added to) Output Sequence (uint32_t)
+ * @flags: operational flags: SGF, PRE, EXT, RTO, RST, EWS (when PRE or RTO are
+ * set, @dst parameter must be 0).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQOUTPTR(program, dst, length, flags) \
+ rta_seq_out_ptr(program, dst, length, flags)
+
+/**
+ * ALG_OPERATION - Configures ALGORITHM OPERATION command
+ * @program: pointer to struct program
+ * @cipher_alg: algorithm to be used
+ * @aai: Additional Algorithm Information; contains mode information that is
+ * associated with the algorithm (check desc.h for specific values).
+ * @algo_state: algorithm state; defines the state of the algorithm that is
+ * being executed (check desc.h file for specific values).
+ * @icv_check: ICV checking; selects whether the algorithm should check
+ * calculated ICV with known ICV: ICV_CHECK_ENABLE,
+ * ICV_CHECK_DISABLE.
+ * @enc: selects between encryption and decryption: DIR_ENC, DIR_DEC
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define ALG_OPERATION(program, cipher_alg, aai, algo_state, icv_check, enc) \
+ rta_operation(program, cipher_alg, aai, algo_state, icv_check, enc)
+
+/**
+ * PROTOCOL - Configures PROTOCOL OPERATION command
+ * @program: pointer to struct program
+ * @optype: operation type: OP_TYPE_UNI_PROTOCOL / OP_TYPE_DECAP_PROTOCOL /
+ * OP_TYPE_ENCAP_PROTOCOL.
+ * @protid: protocol identifier value (check desc.h file for specific values)
+ * @protoinfo: protocol dependent value (check desc.h file for specific values)
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define PROTOCOL(program, optype, protid, protoinfo) \
+ rta_proto_operation(program, optype, protid, protoinfo)
+
+/**
+ * DKP_PROTOCOL - Configures DKP (Derived Key Protocol) PROTOCOL command
+ * @program: pointer to struct program
+ * @protid: protocol identifier value - one of the following:
+ * OP_PCLID_DKP_{MD5 | SHA1 | SHA224 | SHA256 | SHA384 | SHA512}
+ * @key_src: How the initial ("negotiated") key is provided to the DKP protocol.
+ * Valid values - one of OP_PCL_DKP_SRC_{IMM, SEQ, PTR, SGF}. Not all
+ * (key_src,key_dst) combinations are allowed.
+ * @key_dst: How the derived ("split") key is returned by the DKP protocol.
+ * Valid values - one of OP_PCL_DKP_DST_{IMM, SEQ, PTR, SGF}. Not all
+ * (key_src,key_dst) combinations are allowed.
+ * @keylen: length of the initial key, in bytes (uint16_t)
+ * @key: address where algorithm key resides; virtual address if key_type is
+ * RTA_DATA_IMM, physical (bus) address if key_type is RTA_DATA_PTR or
+ * RTA_DATA_IMM_DMA.
+ * @key_type: enum rta_data_type
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define DKP_PROTOCOL(program, protid, key_src, key_dst, keylen, key, key_type) \
+ rta_dkp_proto(program, protid, key_src, key_dst, keylen, key, key_type)
+
+/**
+ * PKHA_OPERATION - Configures PKHA OPERATION command
+ * @program: pointer to struct program
+ * @op_pkha: PKHA operation; indicates the modular arithmetic function to
+ * execute (check desc.h file for specific values).
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define PKHA_OPERATION(program, op_pkha) rta_pkha_operation(program, op_pkha)
+
+/**
+ * JUMP - Configures JUMP command
+ * @program: pointer to struct program
+ * @addr: local offset for local jumps or address pointer for non-local jumps;
+ * IMM or PTR macros must be used to indicate type.
+ * @jump_type: type of action taken by jump (enum rta_jump_type)
+ * @test_type: defines how jump conditions are evaluated (enum rta_jump_cond)
+ * @cond: jump conditions: operational flags - DONE1, DONE2, BOTH; various
+ * sharing and wait conditions (JSL = 1) - NIFP, NIP, NOP, NCP, CALM,
+ * SELF, SHARED, JQP; Math and PKHA status conditions (JSL = 0) - Z, N,
+ * NV, C, PK0, PK1, PKP.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JUMP(program, addr, jump_type, test_type, cond) \
+ rta_jump(program, addr, jump_type, test_type, cond, NONE)
+
+/**
+ * JUMP_INC - Configures JUMP_INC command
+ * @program: pointer to struct program
+ * @addr: local offset; IMM or PTR macros must be used to indicate type
+ * @test_type: defines how jump conditions are evaluated (enum rta_jump_cond)
+ * @cond: jump conditions: Math status conditions (JSL = 0): Z, N, NV, C
+ * @src_dst: register to increment / decrement: MATH0-MATH3, DPOVRD, SEQINSZ,
+ * SEQOUTSZ, VSEQINSZ, VSEQOUTSZ.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JUMP_INC(program, addr, test_type, cond, src_dst) \
+ rta_jump(program, addr, LOCAL_JUMP_INC, test_type, cond, src_dst)
+
+/**
+ * JUMP_DEC - Configures JUMP_DEC command
+ * @program: pointer to struct program
+ * @addr: local offset; IMM or PTR macros must be used to indicate type
+ * @test_type: defines how jump conditions are evaluated (enum rta_jump_cond)
+ * @cond: jump conditions: Math status conditions (JSL = 0): Z, N, NV, C
+ * @src_dst: register to increment / decrement: MATH0-MATH3, DPOVRD, SEQINSZ,
+ * SEQOUTSZ, VSEQINSZ, VSEQOUTSZ.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define JUMP_DEC(program, addr, test_type, cond, src_dst) \
+ rta_jump(program, addr, LOCAL_JUMP_DEC, test_type, cond, src_dst)
+
+/**
+ * LOAD - Configures LOAD command to load data registers from descriptor or from
+ * a memory location.
+ * @program: pointer to struct program
+ * @addr: immediate value or pointer to the data to be loaded; IMMED, COPY and
+ * DCOPY flags indicate action taken (inline imm data, inline ptr, inline
+ * from ptr).
+ * @dst: destination register (uint64_t)
+ * @offset: start point to write data in destination register (uint32_t)
+ * @length: number of bytes to load (uint32_t)
+ * @flags: operational flags: VLF, IMMED, COPY, DCOPY
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define LOAD(program, addr, dst, offset, length, flags) \
+ rta_load(program, addr, dst, offset, length, flags)
+
+/**
+ * SEQLOAD - Configures SEQ LOAD command to load data registers from descriptor
+ * or from a memory location.
+ * @program: pointer to struct program
+ * @dst: destination register (uint64_t)
+ * @offset: start point to write data in destination register (uint32_t)
+ * @length: number of bytes to load (uint32_t)
+ * @flags: operational flags: SGF
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQLOAD(program, dst, offset, length, flags) \
+ rta_load(program, NONE, dst, offset, length, flags|SEQ)
+
+/**
+ * STORE - Configures STORE command to read data from registers and write them
+ * to a memory location.
+ * @program: pointer to struct program
+ * @src: immediate value or source register for data to be stored: KEY1SZ,
+ * KEY2SZ, DJQDA, MODE1, MODE2, DJQCTRL, DATA1SZ, DATA2SZ, DSTAT, ICV1SZ,
+ * ICV2SZ, DPID, CCTRL, ICTRL, CLRW, CSTAT, MATH0-MATH3, PKHA registers,
+ * CONTEXT1, CONTEXT2, DESCBUF, JOBDESCBUF, SHAREDESCBUF. In case of
+ * immediate value, IMMED, COPY and DCOPY flags indicate action taken
+ * (inline imm data, inline ptr, inline from ptr).
+ * @offset: start point for reading from source register (uint16_t)
+ * @dst: pointer to store location (uint64_t)
+ * @length: number of bytes to store (uint32_t)
+ * @flags: operational flags: VLF, IMMED, COPY, DCOPY
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define STORE(program, src, offset, dst, length, flags) \
+ rta_store(program, src, offset, dst, length, flags)
+
+/**
+ * SEQSTORE - Configures SEQ STORE command to read data from registers and write
+ * them to a memory location.
+ * @program: pointer to struct program
+ * @src: immediate value or source register for data to be stored: KEY1SZ,
+ * KEY2SZ, DJQDA, MODE1, MODE2, DJQCTRL, DATA1SZ, DATA2SZ, DSTAT, ICV1SZ,
+ * ICV2SZ, DPID, CCTRL, ICTRL, CLRW, CSTAT, MATH0-MATH3, PKHA registers,
+ * CONTEXT1, CONTEXT2, DESCBUF, JOBDESCBUF, SHAREDESCBUF. In case of
+ * immediate value, IMMED, COPY and DCOPY flags indicate action taken
+ * (inline imm data, inline ptr, inline from ptr).
+ * @offset: start point for reading from source register (uint16_t)
+ * @length: number of bytes to store (uint32_t)
+ * @flags: operational flags: SGF, IMMED, COPY, DCOPY
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SEQSTORE(program, src, offset, length, flags) \
+ rta_store(program, src, offset, NONE, length, flags|SEQ)
+
+/**
+ * MATHB - Configures MATHB command to perform binary operations
+ * @program: pointer to struct program
+ * @operand1: first operand: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ, VSEQINSZ,
+ * VSEQOUTSZ, ZERO, ONE, NONE, Immediate value. IMMED must be used to
+ * indicate immediate value.
+ * @operator: function to be performed: ADD, ADDC, SUB, SUBB, OR, AND, XOR,
+ * LSHIFT, RSHIFT, SHLD.
+ * @operand2: second operand: MATH0-MATH3, DPOVRD, VSEQINSZ, VSEQOUTSZ, ABD,
+ * OFIFO, JOBSRC, ZERO, ONE, Immediate value. IMMED2 must be used to
+ * indicate immediate value.
+ * @result: destination for the result: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ,
+ * NONE, VSEQINSZ, VSEQOUTSZ.
+ * @length: length in bytes of the operation and the immediate value, if there
+ * is one (int).
+ * @opt: operational flags: IFB, NFU, STL, SWP, IMMED, IMMED2
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MATHB(program, operand1, operator, operand2, result, length, opt) \
+ rta_math(program, operand1, MATH_FUN_##operator, operand2, result, \
+ length, opt)
+
+/**
+ * MATHI - Configures MATHI command to perform binary operations
+ * @program: pointer to struct program
+ * @operand: if !SSEL: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ, VSEQINSZ,
+ * VSEQOUTSZ, ZERO, ONE.
+ * if SSEL: MATH0-MATH3, DPOVRD, VSEQINSZ, VSEQOUTSZ, ABD, OFIFO,
+ * JOBSRC, ZERO, ONE.
+ * @operator: function to be performed: ADD, ADDC, SUB, SUBB, OR, AND, XOR,
+ * LSHIFT, RSHIFT, FBYT (for !SSEL only).
+ * @imm: Immediate value (uint8_t). IMMED must be used to indicate immediate
+ * value.
+ * @result: destination for the result: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ,
+ * NONE, VSEQINSZ, VSEQOUTSZ.
+ * @length: length in bytes of the operation and the immediate value, if there
+ * is one (int). @imm is left-extended with zeros if needed.
+ * @opt: operational flags: NFU, SSEL, SWP, IMMED
+ *
+ * If !SSEL, @operand <@operator> @imm -> @result
+ * If SSEL, @imm <@operator> @operand -> @result
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MATHI(program, operand, operator, imm, result, length, opt) \
+ rta_mathi(program, operand, MATH_FUN_##operator, imm, result, length, \
+ opt)
+
+/**
+ * MATHU - Configures MATHU command to perform unary operations
+ * @program: pointer to struct program
+ * @operand1: operand: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ, VSEQINSZ,
+ * VSEQOUTSZ, ZERO, ONE, NONE, Immediate value. IMMED must be used to
+ * indicate immediate value.
+ * @operator: function to be performed: ZBYT, BSWAP
+ * @result: destination for the result: MATH0-MATH3, DPOVRD, SEQINSZ, SEQOUTSZ,
+ * NONE, VSEQINSZ, VSEQOUTSZ.
+ * @length: length in bytes of the operation and the immediate value, if there
+ * is one (int).
+ * @opt: operational flags: NFU, STL, SWP, IMMED
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define MATHU(program, operand1, operator, result, length, opt) \
+ rta_math(program, operand1, MATH_FUN_##operator, NONE, result, length, \
+ opt)
+
+/**
+ * SIGNATURE - Configures SIGNATURE command
+ * @program: pointer to struct program
+ * @sign_type: signature type: SIGN_TYPE_FINAL, SIGN_TYPE_FINAL_RESTORE,
+ * SIGN_TYPE_FINAL_NONZERO, SIGN_TYPE_IMM_2, SIGN_TYPE_IMM_3,
+ * SIGN_TYPE_IMM_4.
+ *
+ * After SIGNATURE command, DWORD or WORD must be used to insert signature in
+ * descriptor buffer.
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define SIGNATURE(program, sign_type) rta_signature(program, sign_type)
+
+/**
+ * NFIFOADD - Configures NFIFO command, a shortcut of RTA Load command to write
+ * to iNfo FIFO.
+ * @program: pointer to struct program
+ * @src: source for the input data in Alignment Block:IFIFO, OFIFO, PAD,
+ * MSGOUTSNOOP, ALTSOURCE, OFIFO_SYNC, MSGOUTSNOOP_ALT.
+ * @data: type of data that is going through the Input Data FIFO: MSG, MSG1,
+ * MSG2, IV1, IV2, ICV1, ICV2, SAD1, AAD1, AAD2, AFHA_SBOX, SKIP,
+ * PKHA registers, AB1, AB2, ABD.
+ * @length: length of the data copied in FIFO registers (uint32_t)
+ * @flags: select options between:
+ * -operational flags: LAST1, LAST2, FLUSH1, FLUSH2, OC, BP
+ * -when PAD is selected as source: BM, PR, PS
+ * -padding type: <em>PAD_ZERO, PAD_NONZERO, PAD_INCREMENT, PAD_RANDOM,
+ * PAD_ZERO_N1, PAD_NONZERO_0, PAD_N1, PAD_NONZERO_N
+ *
+ * Return: On success, descriptor buffer offset where this command is inserted.
+ * On error, a negative error code; first error program counter will
+ * point to offset in descriptor buffer where the instruction should
+ * have been written.
+ */
+#define NFIFOADD(program, src, data, length, flags) \
+ rta_nfifo_load(program, src, data, length, flags)
+
+/**
+ * DOC: Self Referential Code Management Routines
+ *
+ * Contains details of RTA self referential code routines.
+ */
+
+/**
+ * REFERENCE - initialize a variable used for storing an index inside a
+ * descriptor buffer.
+ * @ref: reference to a descriptor buffer's index where an update is required
+ * with a value that will be known latter in the program flow.
+ */
+#define REFERENCE(ref) int ref = -1
+
+/**
+ * LABEL - initialize a variable used for storing an index inside a descriptor
+ * buffer.
+ * @label: label stores the value with what should be updated the REFERENCE line
+ * in the descriptor buffer.
+ */
+#define LABEL(label) unsigned int label = 0
+
+/**
+ * SET_LABEL - set a LABEL value
+ * @program: pointer to struct program
+ * @label: value that will be inserted in a line previously written in the
+ * descriptor buffer.
+ */
+#define SET_LABEL(program, label) (label = rta_set_label(program))
+
+/**
+ * PATCH_JUMP - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For JUMP command, the value represents the offset field (in words).
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_JUMP(program, line, new_ref) rta_patch_jmp(program, line, new_ref)
+
+/**
+ * PATCH_MOVE - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For MOVE command, the value represents the offset field (in words).
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_MOVE(program, line, new_ref) \
+ rta_patch_move(program, line, new_ref)
+
+/**
+ * PATCH_LOAD - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For LOAD command, the value represents the offset field (in words).
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_LOAD(program, line, new_ref) \
+ rta_patch_load(program, line, new_ref)
+
+/**
+ * PATCH_STORE - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For STORE command, the value represents the offset field (in words).
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_STORE(program, line, new_ref) \
+ rta_patch_store(program, line, new_ref)
+
+/**
+ * PATCH_HDR - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @new_ref: updated value that will be inserted in descriptor buffer at the
+ * specified line; this value is previously obtained using SET_LABEL
+ * macro near the line that will be used as reference (unsigned int).
+ * For HEADER command, the value represents the start index field.
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_HDR(program, line, new_ref) \
+ rta_patch_header(program, line, new_ref)
+
+/**
+ * PATCH_RAW - Auxiliary command to resolve self referential code
+ * @program: buffer to be updated (struct program *)
+ * @line: position in descriptor buffer where the update will be done; this
+ * value is previously retained in program flow using a reference near
+ * the sequence to be modified.
+ * @mask: mask to be used for applying the new value (unsigned int). The mask
+ * selects which bits from the provided @new_val are taken into
+ * consideration when overwriting the existing value.
+ * @new_val: updated value that will be masked using the provided mask value
+ * and inserted in descriptor buffer at the specified line.
+ *
+ * Return: 0 in case of success, a negative error code if it fails
+ */
+#define PATCH_RAW(program, line, mask, new_val) \
+ rta_patch_raw(program, line, mask, new_val)
+
+#endif /* __RTA_RTA_H__ */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h
new file mode 100644
index 00000000..79a48da2
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/fifo_load_store_cmd.h
@@ -0,0 +1,346 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_FIFO_LOAD_STORE_CMD_H__
+#define __RTA_FIFO_LOAD_STORE_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t fifo_load_table[][2] = {
+/*1*/ { PKA0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A0 },
+ { PKA1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A1 },
+ { PKA2, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A2 },
+ { PKA3, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A3 },
+ { PKB0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B0 },
+ { PKB1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B1 },
+ { PKB2, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B2 },
+ { PKB3, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B3 },
+ { PKA, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_A },
+ { PKB, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_B },
+ { PKN, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_PK_N },
+ { SKIP, FIFOLD_CLASS_SKIP },
+ { MSG1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG },
+ { MSG2, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG },
+ { MSGOUTSNOOP, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG1OUT2 },
+ { MSGINSNOOP, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG },
+ { IV1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV },
+ { IV2, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_IV },
+ { AAD1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_AAD },
+ { ICV1, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_ICV },
+ { ICV2, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV },
+ { BIT_DATA, FIFOLD_TYPE_BITDATA },
+/*23*/ { IFIFO, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_NOINFOFIFO }
+};
+
+/*
+ * Allowed FIFO_LOAD input data types for each SEC Era.
+ * Values represent the number of entries from fifo_load_table[] that are
+ * supported.
+ */
+static const unsigned int fifo_load_table_sz[] = {22, 22, 23, 23,
+ 23, 23, 23, 23};
+
+static inline int
+rta_fifo_load(struct program *program, uint32_t src,
+ uint64_t loc, uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0;
+ uint32_t ext_length = 0, val = 0;
+ int ret = -EINVAL;
+ bool is_seq_cmd = false;
+ unsigned int start_pc = program->current_pc;
+
+ /* write command type field */
+ if (flags & SEQ) {
+ opcode = CMD_SEQ_FIFO_LOAD;
+ is_seq_cmd = true;
+ } else {
+ opcode = CMD_FIFO_LOAD;
+ }
+
+ /* Parameters checking */
+ if (is_seq_cmd) {
+ if ((flags & IMMED) || (flags & SGF)) {
+ pr_err("SEQ FIFO LOAD: Invalid command\n");
+ goto err;
+ }
+ if ((rta_sec_era <= RTA_SEC_ERA_5) && (flags & AIDF)) {
+ pr_err("SEQ FIFO LOAD: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+ if ((flags & VLF) && ((flags & EXT) || (length >> 16))) {
+ pr_err("SEQ FIFO LOAD: Invalid usage of VLF\n");
+ goto err;
+ }
+ } else {
+ if (src == SKIP) {
+ pr_err("FIFO LOAD: Invalid src\n");
+ goto err;
+ }
+ if ((flags & AIDF) || (flags & VLF)) {
+ pr_err("FIFO LOAD: Invalid command\n");
+ goto err;
+ }
+ if ((flags & IMMED) && (flags & SGF)) {
+ pr_err("FIFO LOAD: Invalid usage of SGF and IMM\n");
+ goto err;
+ }
+ if ((flags & IMMED) && ((flags & EXT) || (length >> 16))) {
+ pr_err("FIFO LOAD: Invalid usage of EXT and IMM\n");
+ goto err;
+ }
+ }
+
+ /* write input data type field */
+ ret = __rta_map_opcode(src, fifo_load_table,
+ fifo_load_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("FIFO LOAD: Source value is not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ opcode |= val;
+
+ if (flags & CLASS1)
+ opcode |= FIFOLD_CLASS_CLASS1;
+ if (flags & CLASS2)
+ opcode |= FIFOLD_CLASS_CLASS2;
+ if (flags & BOTH)
+ opcode |= FIFOLD_CLASS_BOTH;
+
+ /* write fields: SGF|VLF, IMM, [LC1, LC2, F1] */
+ if (flags & FLUSH1)
+ opcode |= FIFOLD_TYPE_FLUSH1;
+ if (flags & LAST1)
+ opcode |= FIFOLD_TYPE_LAST1;
+ if (flags & LAST2)
+ opcode |= FIFOLD_TYPE_LAST2;
+ if (!is_seq_cmd) {
+ if (flags & SGF)
+ opcode |= FIFOLDST_SGF;
+ if (flags & IMMED)
+ opcode |= FIFOLD_IMM;
+ } else {
+ if (flags & VLF)
+ opcode |= FIFOLDST_VLF;
+ if (flags & AIDF)
+ opcode |= FIFOLD_AIDF;
+ }
+
+ /*
+ * Verify if extended length is required. In case of BITDATA, calculate
+ * number of full bytes and additional valid bits.
+ */
+ if ((flags & EXT) || (length >> 16)) {
+ opcode |= FIFOLDST_EXT;
+ if (src == BIT_DATA) {
+ ext_length = (length / 8);
+ length = (length % 8);
+ } else {
+ ext_length = length;
+ length = 0;
+ }
+ }
+ opcode |= (uint16_t) length;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* write pointer or immediate data field */
+ if (flags & IMMED)
+ __rta_inline_data(program, loc, flags & __COPY_MASK, length);
+ else if (!is_seq_cmd)
+ __rta_out64(program, program->ps, loc);
+
+ /* write extended length field */
+ if (opcode & FIFOLDST_EXT)
+ __rta_out32(program, ext_length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static const uint32_t fifo_store_table[][2] = {
+/*1*/ { PKA0, FIFOST_TYPE_PKHA_A0 },
+ { PKA1, FIFOST_TYPE_PKHA_A1 },
+ { PKA2, FIFOST_TYPE_PKHA_A2 },
+ { PKA3, FIFOST_TYPE_PKHA_A3 },
+ { PKB0, FIFOST_TYPE_PKHA_B0 },
+ { PKB1, FIFOST_TYPE_PKHA_B1 },
+ { PKB2, FIFOST_TYPE_PKHA_B2 },
+ { PKB3, FIFOST_TYPE_PKHA_B3 },
+ { PKA, FIFOST_TYPE_PKHA_A },
+ { PKB, FIFOST_TYPE_PKHA_B },
+ { PKN, FIFOST_TYPE_PKHA_N },
+ { PKE, FIFOST_TYPE_PKHA_E_JKEK },
+ { RNG, FIFOST_TYPE_RNGSTORE },
+ { RNGOFIFO, FIFOST_TYPE_RNGFIFO },
+ { AFHA_SBOX, FIFOST_TYPE_AF_SBOX_JKEK },
+ { MDHA_SPLIT_KEY, FIFOST_CLASS_CLASS2KEY | FIFOST_TYPE_SPLIT_KEK },
+ { MSG, FIFOST_TYPE_MESSAGE_DATA },
+ { KEY1, FIFOST_CLASS_CLASS1KEY | FIFOST_TYPE_KEY_KEK },
+ { KEY2, FIFOST_CLASS_CLASS2KEY | FIFOST_TYPE_KEY_KEK },
+ { OFIFO, FIFOST_TYPE_OUTFIFO_KEK},
+ { SKIP, FIFOST_TYPE_SKIP },
+/*22*/ { METADATA, FIFOST_TYPE_METADATA},
+ { MSG_CKSUM, FIFOST_TYPE_MESSAGE_DATA2 }
+};
+
+/*
+ * Allowed FIFO_STORE output data types for each SEC Era.
+ * Values represent the number of entries from fifo_store_table[] that are
+ * supported.
+ */
+static const unsigned int fifo_store_table_sz[] = {21, 21, 21, 21,
+ 22, 22, 22, 23};
+
+static inline int
+rta_fifo_store(struct program *program, uint32_t src,
+ uint32_t encrypt_flags, uint64_t dst,
+ uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0;
+ uint32_t val = 0;
+ int ret = -EINVAL;
+ bool is_seq_cmd = false;
+ unsigned int start_pc = program->current_pc;
+
+ /* write command type field */
+ if (flags & SEQ) {
+ opcode = CMD_SEQ_FIFO_STORE;
+ is_seq_cmd = true;
+ } else {
+ opcode = CMD_FIFO_STORE;
+ }
+
+ /* Parameter checking */
+ if (is_seq_cmd) {
+ if ((flags & VLF) && ((length >> 16) || (flags & EXT))) {
+ pr_err("SEQ FIFO STORE: Invalid usage of VLF\n");
+ goto err;
+ }
+ if (dst) {
+ pr_err("SEQ FIFO STORE: Invalid command\n");
+ goto err;
+ }
+ if ((src == METADATA) && (flags & (CONT | EXT))) {
+ pr_err("SEQ FIFO STORE: Invalid flags\n");
+ goto err;
+ }
+ } else {
+ if (((src == RNGOFIFO) && ((dst) || (flags & EXT))) ||
+ (src == METADATA)) {
+ pr_err("FIFO STORE: Invalid destination\n");
+ goto err;
+ }
+ }
+ if ((rta_sec_era == RTA_SEC_ERA_7) && (src == AFHA_SBOX)) {
+ pr_err("FIFO STORE: AFHA S-box not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ /* write output data type field */
+ ret = __rta_map_opcode(src, fifo_store_table,
+ fifo_store_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("FIFO STORE: Source type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ opcode |= val;
+
+ if (encrypt_flags & TK)
+ opcode |= (0x1 << FIFOST_TYPE_SHIFT);
+ if (encrypt_flags & EKT) {
+ if (rta_sec_era == RTA_SEC_ERA_1) {
+ pr_err("FIFO STORE: AES-CCM source types not supported\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ opcode |= (0x10 << FIFOST_TYPE_SHIFT);
+ opcode &= (uint32_t)~(0x20 << FIFOST_TYPE_SHIFT);
+ }
+
+ /* write flags fields */
+ if (flags & CONT)
+ opcode |= FIFOST_CONT;
+ if ((flags & VLF) && (is_seq_cmd))
+ opcode |= FIFOLDST_VLF;
+ if ((flags & SGF) && (!is_seq_cmd))
+ opcode |= FIFOLDST_SGF;
+ if (flags & CLASS1)
+ opcode |= FIFOST_CLASS_CLASS1KEY;
+ if (flags & CLASS2)
+ opcode |= FIFOST_CLASS_CLASS2KEY;
+ if (flags & BOTH)
+ opcode |= FIFOST_CLASS_BOTH;
+
+ /* Verify if extended length is required */
+ if ((length >> 16) || (flags & EXT))
+ opcode |= FIFOLDST_EXT;
+ else
+ opcode |= (uint16_t) length;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* write pointer field */
+ if ((!is_seq_cmd) && (dst))
+ __rta_out64(program, program->ps, dst);
+
+ /* write extended length field */
+ if (opcode & FIFOLDST_EXT)
+ __rta_out32(program, length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_FIFO_LOAD_STORE_CMD_H__ */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h
new file mode 100644
index 00000000..c2a27a2d
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/header_cmd.h
@@ -0,0 +1,251 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_HEADER_CMD_H__
+#define __RTA_HEADER_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+/* Allowed job header flags for each SEC Era. */
+static const uint32_t job_header_flags[] = {
+ DNR | TD | MTD | SHR | REO,
+ DNR | TD | MTD | SHR | REO | RSMS,
+ DNR | TD | MTD | SHR | REO | RSMS,
+ DNR | TD | MTD | SHR | REO | RSMS,
+ DNR | TD | MTD | SHR | REO | RSMS | EXT,
+ DNR | TD | MTD | SHR | REO | RSMS | EXT,
+ DNR | TD | MTD | SHR | REO | RSMS | EXT,
+ DNR | TD | MTD | SHR | REO | EXT
+};
+
+/* Allowed shared header flags for each SEC Era. */
+static const uint32_t shr_header_flags[] = {
+ DNR | SC | PD,
+ DNR | SC | PD | CIF,
+ DNR | SC | PD | CIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF,
+ DNR | SC | PD | CIF | RIF
+};
+
+static inline int
+rta_shr_header(struct program *program,
+ enum rta_share_type share,
+ unsigned int start_idx,
+ uint32_t flags)
+{
+ uint32_t opcode = CMD_SHARED_DESC_HDR;
+ unsigned int start_pc = program->current_pc;
+
+ if (flags & ~shr_header_flags[rta_sec_era]) {
+ pr_err("SHR_DESC: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ switch (share) {
+ case SHR_ALWAYS:
+ opcode |= HDR_SHARE_ALWAYS;
+ break;
+ case SHR_SERIAL:
+ opcode |= HDR_SHARE_SERIAL;
+ break;
+ case SHR_NEVER:
+ /*
+ * opcode |= HDR_SHARE_NEVER;
+ * HDR_SHARE_NEVER is 0
+ */
+ break;
+ case SHR_WAIT:
+ opcode |= HDR_SHARE_WAIT;
+ break;
+ default:
+ pr_err("SHR_DESC: SHARE VALUE is not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ opcode |= HDR_ONE;
+ opcode |= (start_idx << HDR_START_IDX_SHIFT) & HDR_START_IDX_MASK;
+
+ if (flags & DNR)
+ opcode |= HDR_DNR;
+ if (flags & CIF)
+ opcode |= HDR_CLEAR_IFIFO;
+ if (flags & SC)
+ opcode |= HDR_SAVECTX;
+ if (flags & PD)
+ opcode |= HDR_PROP_DNR;
+ if (flags & RIF)
+ opcode |= HDR_RIF;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if (program->current_instruction == 1)
+ program->shrhdr = program->buffer;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return -EINVAL;
+}
+
+static inline int
+rta_job_header(struct program *program,
+ enum rta_share_type share,
+ unsigned int start_idx,
+ uint64_t shr_desc, uint32_t flags,
+ uint32_t ext_flags)
+{
+ uint32_t opcode = CMD_DESC_HDR;
+ uint32_t hdr_ext = 0;
+ unsigned int start_pc = program->current_pc;
+
+ if (flags & ~job_header_flags[rta_sec_era]) {
+ pr_err("JOB_DESC: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ switch (share) {
+ case SHR_ALWAYS:
+ opcode |= HDR_SHARE_ALWAYS;
+ break;
+ case SHR_SERIAL:
+ opcode |= HDR_SHARE_SERIAL;
+ break;
+ case SHR_NEVER:
+ /*
+ * opcode |= HDR_SHARE_NEVER;
+ * HDR_SHARE_NEVER is 0
+ */
+ break;
+ case SHR_WAIT:
+ opcode |= HDR_SHARE_WAIT;
+ break;
+ case SHR_DEFER:
+ opcode |= HDR_SHARE_DEFER;
+ break;
+ default:
+ pr_err("JOB_DESC: SHARE VALUE is not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ if ((flags & TD) && (flags & REO)) {
+ pr_err("JOB_DESC: REO flag not supported for trusted descriptors. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ if ((rta_sec_era < RTA_SEC_ERA_7) && (flags & MTD) && !(flags & TD)) {
+ pr_err("JOB_DESC: Trying to MTD a descriptor that is not a TD. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ if ((flags & EXT) && !(flags & SHR) && (start_idx < 2)) {
+ pr_err("JOB_DESC: Start index must be >= 2 in case of no SHR and EXT. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ opcode |= HDR_ONE;
+ opcode |= ((start_idx << HDR_START_IDX_SHIFT) & HDR_START_IDX_MASK);
+
+ if (flags & EXT) {
+ opcode |= HDR_EXT;
+
+ if (ext_flags & DSV) {
+ hdr_ext |= HDR_EXT_DSEL_VALID;
+ hdr_ext |= ext_flags & DSEL_MASK;
+ }
+
+ if (ext_flags & FTD) {
+ if (rta_sec_era <= RTA_SEC_ERA_5) {
+ pr_err("JOB_DESC: Fake trusted descriptor not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ hdr_ext |= HDR_EXT_FTD;
+ }
+ }
+ if (flags & RSMS)
+ opcode |= HDR_RSLS;
+ if (flags & DNR)
+ opcode |= HDR_DNR;
+ if (flags & TD)
+ opcode |= HDR_TRUSTED;
+ if (flags & MTD)
+ opcode |= HDR_MAKE_TRUSTED;
+ if (flags & REO)
+ opcode |= HDR_REVERSE;
+ if (flags & SHR)
+ opcode |= HDR_SHARED;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if (program->current_instruction == 1) {
+ program->jobhdr = program->buffer;
+
+ if (opcode & HDR_SHARED)
+ __rta_out64(program, program->ps, shr_desc);
+ }
+
+ if (flags & EXT)
+ __rta_out32(program, hdr_ext);
+
+ /* Note: descriptor length is set in program_finalize routine */
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return -EINVAL;
+}
+
+#endif /* __RTA_HEADER_CMD_H__ */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h
new file mode 100644
index 00000000..2c85beec
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/jump_cmd.h
@@ -0,0 +1,207 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_JUMP_CMD_H__
+#define __RTA_JUMP_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t jump_test_cond[][2] = {
+ { NIFP, JUMP_COND_NIFP },
+ { NIP, JUMP_COND_NIP },
+ { NOP, JUMP_COND_NOP },
+ { NCP, JUMP_COND_NCP },
+ { CALM, JUMP_COND_CALM },
+ { SELF, JUMP_COND_SELF },
+ { SHRD, JUMP_COND_SHRD },
+ { JQP, JUMP_COND_JQP },
+ { MATH_Z, JUMP_COND_MATH_Z },
+ { MATH_N, JUMP_COND_MATH_N },
+ { MATH_NV, JUMP_COND_MATH_NV },
+ { MATH_C, JUMP_COND_MATH_C },
+ { PK_0, JUMP_COND_PK_0 },
+ { PK_GCD_1, JUMP_COND_PK_GCD_1 },
+ { PK_PRIME, JUMP_COND_PK_PRIME },
+ { CLASS1, JUMP_CLASS_CLASS1 },
+ { CLASS2, JUMP_CLASS_CLASS2 },
+ { BOTH, JUMP_CLASS_BOTH }
+};
+
+static const uint32_t jump_test_math_cond[][2] = {
+ { MATH_Z, JUMP_COND_MATH_Z },
+ { MATH_N, JUMP_COND_MATH_N },
+ { MATH_NV, JUMP_COND_MATH_NV },
+ { MATH_C, JUMP_COND_MATH_C }
+};
+
+static const uint32_t jump_src_dst[][2] = {
+ { MATH0, JUMP_SRC_DST_MATH0 },
+ { MATH1, JUMP_SRC_DST_MATH1 },
+ { MATH2, JUMP_SRC_DST_MATH2 },
+ { MATH3, JUMP_SRC_DST_MATH3 },
+ { DPOVRD, JUMP_SRC_DST_DPOVRD },
+ { SEQINSZ, JUMP_SRC_DST_SEQINLEN },
+ { SEQOUTSZ, JUMP_SRC_DST_SEQOUTLEN },
+ { VSEQINSZ, JUMP_SRC_DST_VARSEQINLEN },
+ { VSEQOUTSZ, JUMP_SRC_DST_VARSEQOUTLEN }
+};
+
+static inline int
+rta_jump(struct program *program, uint64_t address,
+ enum rta_jump_type jump_type,
+ enum rta_jump_cond test_type,
+ uint32_t test_condition, uint32_t src_dst)
+{
+ uint32_t opcode = CMD_JUMP;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ if (((jump_type == GOSUB) || (jump_type == RETURN)) &&
+ (rta_sec_era < RTA_SEC_ERA_4)) {
+ pr_err("JUMP: Jump type not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ if (((jump_type == LOCAL_JUMP_INC) || (jump_type == LOCAL_JUMP_DEC)) &&
+ (rta_sec_era <= RTA_SEC_ERA_5)) {
+ pr_err("JUMP_INCDEC: Jump type not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ switch (jump_type) {
+ case (LOCAL_JUMP):
+ /*
+ * opcode |= JUMP_TYPE_LOCAL;
+ * JUMP_TYPE_LOCAL is 0
+ */
+ break;
+ case (HALT):
+ opcode |= JUMP_TYPE_HALT;
+ break;
+ case (HALT_STATUS):
+ opcode |= JUMP_TYPE_HALT_USER;
+ break;
+ case (FAR_JUMP):
+ opcode |= JUMP_TYPE_NONLOCAL;
+ break;
+ case (GOSUB):
+ opcode |= JUMP_TYPE_GOSUB;
+ break;
+ case (RETURN):
+ opcode |= JUMP_TYPE_RETURN;
+ break;
+ case (LOCAL_JUMP_INC):
+ opcode |= JUMP_TYPE_LOCAL_INC;
+ break;
+ case (LOCAL_JUMP_DEC):
+ opcode |= JUMP_TYPE_LOCAL_DEC;
+ break;
+ default:
+ pr_err("JUMP: Invalid jump type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ switch (test_type) {
+ case (ALL_TRUE):
+ /*
+ * opcode |= JUMP_TEST_ALL;
+ * JUMP_TEST_ALL is 0
+ */
+ break;
+ case (ALL_FALSE):
+ opcode |= JUMP_TEST_INVALL;
+ break;
+ case (ANY_TRUE):
+ opcode |= JUMP_TEST_ANY;
+ break;
+ case (ANY_FALSE):
+ opcode |= JUMP_TEST_INVANY;
+ break;
+ default:
+ pr_err("JUMP: test type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ /* write test condition field */
+ if ((jump_type != LOCAL_JUMP_INC) && (jump_type != LOCAL_JUMP_DEC)) {
+ __rta_map_flags(test_condition, jump_test_cond,
+ ARRAY_SIZE(jump_test_cond), &opcode);
+ } else {
+ uint32_t val = 0;
+
+ ret = __rta_map_opcode(src_dst, jump_src_dst,
+ ARRAY_SIZE(jump_src_dst), &val);
+ if (ret < 0) {
+ pr_err("JUMP_INCDEC: SRC_DST not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ __rta_map_flags(test_condition, jump_test_math_cond,
+ ARRAY_SIZE(jump_test_math_cond), &opcode);
+ }
+
+ /* write local offset field for local jumps and user-defined halt */
+ if ((jump_type == LOCAL_JUMP) || (jump_type == LOCAL_JUMP_INC) ||
+ (jump_type == LOCAL_JUMP_DEC) || (jump_type == GOSUB) ||
+ (jump_type == HALT_STATUS))
+ opcode |= (uint32_t)(address & JUMP_OFFSET_MASK);
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if (jump_type == FAR_JUMP)
+ __rta_out64(program, program->ps, address);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_JUMP_CMD_H__ */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h
new file mode 100644
index 00000000..9bef1155
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/key_cmd.h
@@ -0,0 +1,222 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_KEY_CMD_H__
+#define __RTA_KEY_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+/* Allowed encryption flags for each SEC Era */
+static const uint32_t key_enc_flags[] = {
+ ENC,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK,
+ ENC | NWB | EKT | TK | PTS,
+ ENC | NWB | EKT | TK | PTS
+};
+
+static inline int
+rta_key(struct program *program, uint32_t key_dst,
+ uint32_t encrypt_flags, uint64_t src, uint32_t length,
+ uint32_t flags)
+{
+ uint32_t opcode = 0;
+ bool is_seq_cmd = false;
+ unsigned int start_pc = program->current_pc;
+
+ if (encrypt_flags & ~key_enc_flags[rta_sec_era]) {
+ pr_err("KEY: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ /* write cmd type */
+ if (flags & SEQ) {
+ opcode = CMD_SEQ_KEY;
+ is_seq_cmd = true;
+ } else {
+ opcode = CMD_KEY;
+ }
+
+ /* check parameters */
+ if (is_seq_cmd) {
+ if ((flags & IMMED) || (flags & SGF)) {
+ pr_err("SEQKEY: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ if ((rta_sec_era <= RTA_SEC_ERA_5) &&
+ ((flags & VLF) || (flags & AIDF))) {
+ pr_err("SEQKEY: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+ } else {
+ if ((flags & AIDF) || (flags & VLF)) {
+ pr_err("KEY: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ if ((flags & SGF) && (flags & IMMED)) {
+ pr_err("KEY: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ }
+
+ if ((encrypt_flags & PTS) &&
+ ((encrypt_flags & ENC) || (encrypt_flags & NWB) ||
+ (key_dst == PKE))) {
+ pr_err("KEY: Invalid flag / destination. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if (key_dst == AFHA_SBOX) {
+ if (rta_sec_era == RTA_SEC_ERA_7) {
+ pr_err("KEY: AFHA S-box not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ if (flags & IMMED) {
+ pr_err("KEY: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ /*
+ * Sbox data loaded into the ARC-4 processor must be exactly
+ * 258 bytes long, or else a data sequence error is generated.
+ */
+ if (length != 258) {
+ pr_err("KEY: Invalid length. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ }
+
+ /* write key destination and class fields */
+ switch (key_dst) {
+ case (KEY1):
+ opcode |= KEY_DEST_CLASS1;
+ break;
+ case (KEY2):
+ opcode |= KEY_DEST_CLASS2;
+ break;
+ case (PKE):
+ opcode |= KEY_DEST_CLASS1 | KEY_DEST_PKHA_E;
+ break;
+ case (AFHA_SBOX):
+ opcode |= KEY_DEST_CLASS1 | KEY_DEST_AFHA_SBOX;
+ break;
+ case (MDHA_SPLIT_KEY):
+ opcode |= KEY_DEST_CLASS2 | KEY_DEST_MDHA_SPLIT;
+ break;
+ default:
+ pr_err("KEY: Invalid destination. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ /* write key length */
+ length &= KEY_LENGTH_MASK;
+ opcode |= length;
+
+ /* write key command specific flags */
+ if (encrypt_flags & ENC) {
+ /* Encrypted (black) keys must be padded to 8 bytes (CCM) or
+ * 16 bytes (ECB) depending on EKT bit. AES-CCM encrypted keys
+ * (EKT = 1) have 6-byte nonce and 6-byte MAC after padding.
+ */
+ opcode |= KEY_ENC;
+ if (encrypt_flags & EKT) {
+ opcode |= KEY_EKT;
+ length = ALIGN(length, 8);
+ length += 12;
+ } else {
+ length = ALIGN(length, 16);
+ }
+ if (encrypt_flags & TK)
+ opcode |= KEY_TK;
+ }
+ if (encrypt_flags & NWB)
+ opcode |= KEY_NWB;
+ if (encrypt_flags & PTS)
+ opcode |= KEY_PTS;
+
+ /* write general command flags */
+ if (!is_seq_cmd) {
+ if (flags & IMMED)
+ opcode |= KEY_IMM;
+ if (flags & SGF)
+ opcode |= KEY_SGF;
+ } else {
+ if (flags & AIDF)
+ opcode |= KEY_AIDF;
+ if (flags & VLF)
+ opcode |= KEY_VLF;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if (flags & IMMED)
+ __rta_inline_data(program, src, flags & __COPY_MASK, length);
+ else
+ __rta_out64(program, program->ps, src);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return -EINVAL;
+}
+
+#endif /* __RTA_KEY_CMD_H__ */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h
new file mode 100644
index 00000000..1db55b33
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/load_cmd.h
@@ -0,0 +1,335 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_LOAD_CMD_H__
+#define __RTA_LOAD_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+/* Allowed length and offset masks for each SEC Era in case DST = DCTRL */
+static const uint32_t load_len_mask_allowed[] = {
+ 0x000000ee,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe,
+ 0x000000fe
+};
+
+static const uint32_t load_off_mask_allowed[] = {
+ 0x0000000f,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff,
+ 0x000000ff
+};
+
+#define IMM_MUST 0
+#define IMM_CAN 1
+#define IMM_NO 2
+#define IMM_DSNM 3 /* it doesn't matter the src type */
+
+enum e_lenoff {
+ LENOF_03,
+ LENOF_4,
+ LENOF_48,
+ LENOF_448,
+ LENOF_18,
+ LENOF_32,
+ LENOF_24,
+ LENOF_16,
+ LENOF_8,
+ LENOF_128,
+ LENOF_256,
+ DSNM /* it doesn't matter the length/offset values */
+};
+
+struct load_map {
+ uint32_t dst;
+ uint32_t dst_opcode;
+ enum e_lenoff len_off;
+ uint8_t imm_src;
+
+};
+
+static const struct load_map load_dst[] = {
+/*1*/ { KEY1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_KEYSZ_REG,
+ LENOF_4, IMM_MUST },
+ { KEY2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_KEYSZ_REG,
+ LENOF_4, IMM_MUST },
+ { DATA1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_DATASZ_REG,
+ LENOF_448, IMM_MUST },
+ { DATA2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_DATASZ_REG,
+ LENOF_448, IMM_MUST },
+ { ICV1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_ICVSZ_REG,
+ LENOF_4, IMM_MUST },
+ { ICV2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_ICVSZ_REG,
+ LENOF_4, IMM_MUST },
+ { CCTRL, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_CHACTRL,
+ LENOF_4, IMM_MUST },
+ { DCTRL, LDST_CLASS_DECO | LDST_IMM | LDST_SRCDST_WORD_DECOCTRL,
+ DSNM, IMM_DSNM },
+ { ICTRL, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_IRQCTRL,
+ LENOF_4, IMM_MUST },
+ { DPOVRD, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_PCLOVRD,
+ LENOF_4, IMM_MUST },
+ { CLRW, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_CLRW,
+ LENOF_4, IMM_MUST },
+ { AAD1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_DECO_AAD_SZ,
+ LENOF_4, IMM_MUST },
+ { IV1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_CLASS1_IV_SZ,
+ LENOF_4, IMM_MUST },
+ { ALTDS1, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_ALTDS_CLASS1,
+ LENOF_448, IMM_MUST },
+ { PKASZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_A_SZ,
+ LENOF_4, IMM_MUST, },
+ { PKBSZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_B_SZ,
+ LENOF_4, IMM_MUST },
+ { PKNSZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_N_SZ,
+ LENOF_4, IMM_MUST },
+ { PKESZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_E_SZ,
+ LENOF_4, IMM_MUST },
+ { NFIFO, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_INFO_FIFO,
+ LENOF_48, IMM_MUST },
+ { IFIFO, LDST_SRCDST_BYTE_INFIFO, LENOF_18, IMM_MUST },
+ { OFIFO, LDST_SRCDST_BYTE_OUTFIFO, LENOF_18, IMM_MUST },
+ { MATH0, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH0,
+ LENOF_32, IMM_CAN },
+ { MATH1, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH1,
+ LENOF_24, IMM_CAN },
+ { MATH2, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH2,
+ LENOF_16, IMM_CAN },
+ { MATH3, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH3,
+ LENOF_8, IMM_CAN },
+ { CONTEXT1, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT,
+ LENOF_128, IMM_CAN },
+ { CONTEXT2, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT,
+ LENOF_128, IMM_CAN },
+ { KEY1, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_KEY,
+ LENOF_32, IMM_CAN },
+ { KEY2, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY,
+ LENOF_32, IMM_CAN },
+ { DESCBUF, LDST_CLASS_DECO | LDST_SRCDST_WORD_DESCBUF,
+ LENOF_256, IMM_NO },
+ { DPID, LDST_CLASS_DECO | LDST_SRCDST_WORD_PID,
+ LENOF_448, IMM_MUST },
+/*32*/ { IDFNS, LDST_SRCDST_WORD_IFNSR, LENOF_18, IMM_MUST },
+ { ODFNS, LDST_SRCDST_WORD_OFNSR, LENOF_18, IMM_MUST },
+ { ALTSOURCE, LDST_SRCDST_BYTE_ALTSOURCE, LENOF_18, IMM_MUST },
+/*35*/ { NFIFO_SZL, LDST_SRCDST_WORD_INFO_FIFO_SZL, LENOF_48, IMM_MUST },
+ { NFIFO_SZM, LDST_SRCDST_WORD_INFO_FIFO_SZM, LENOF_03, IMM_MUST },
+ { NFIFO_L, LDST_SRCDST_WORD_INFO_FIFO_L, LENOF_48, IMM_MUST },
+ { NFIFO_M, LDST_SRCDST_WORD_INFO_FIFO_M, LENOF_03, IMM_MUST },
+ { SZL, LDST_SRCDST_WORD_SZL, LENOF_48, IMM_MUST },
+/*40*/ { SZM, LDST_SRCDST_WORD_SZM, LENOF_03, IMM_MUST }
+};
+
+/*
+ * Allowed LOAD destinations for each SEC Era.
+ * Values represent the number of entries from load_dst[] that are supported.
+ */
+static const unsigned int load_dst_sz[] = { 31, 34, 34, 40, 40, 40, 40, 40 };
+
+static inline int
+load_check_len_offset(int pos, uint32_t length, uint32_t offset)
+{
+ if ((load_dst[pos].dst == DCTRL) &&
+ ((length & ~load_len_mask_allowed[rta_sec_era]) ||
+ (offset & ~load_off_mask_allowed[rta_sec_era])))
+ goto err;
+
+ switch (load_dst[pos].len_off) {
+ case (LENOF_03):
+ if ((length > 3) || (offset))
+ goto err;
+ break;
+ case (LENOF_4):
+ if ((length != 4) || (offset != 0))
+ goto err;
+ break;
+ case (LENOF_48):
+ if (!(((length == 4) && (offset == 0)) ||
+ ((length == 8) && (offset == 0))))
+ goto err;
+ break;
+ case (LENOF_448):
+ if (!(((length == 4) && (offset == 0)) ||
+ ((length == 4) && (offset == 4)) ||
+ ((length == 8) && (offset == 0))))
+ goto err;
+ break;
+ case (LENOF_18):
+ if ((length < 1) || (length > 8) || (offset != 0))
+ goto err;
+ break;
+ case (LENOF_32):
+ if ((length > 32) || (offset > 32) || ((offset + length) > 32))
+ goto err;
+ break;
+ case (LENOF_24):
+ if ((length > 24) || (offset > 24) || ((offset + length) > 24))
+ goto err;
+ break;
+ case (LENOF_16):
+ if ((length > 16) || (offset > 16) || ((offset + length) > 16))
+ goto err;
+ break;
+ case (LENOF_8):
+ if ((length > 8) || (offset > 8) || ((offset + length) > 8))
+ goto err;
+ break;
+ case (LENOF_128):
+ if ((length > 128) || (offset > 128) ||
+ ((offset + length) > 128))
+ goto err;
+ break;
+ case (LENOF_256):
+ if ((length < 1) || (length > 256) || ((length + offset) > 256))
+ goto err;
+ break;
+ case (DSNM):
+ break;
+ default:
+ goto err;
+ }
+
+ return 0;
+err:
+ return -EINVAL;
+}
+
+static inline int
+rta_load(struct program *program, uint64_t src, uint64_t dst,
+ uint32_t offset, uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0;
+ int pos = -1, ret = -EINVAL;
+ unsigned int start_pc = program->current_pc, i;
+
+ if (flags & SEQ)
+ opcode = CMD_SEQ_LOAD;
+ else
+ opcode = CMD_LOAD;
+
+ if ((length & 0xffffff00) || (offset & 0xffffff00)) {
+ pr_err("LOAD: Bad length/offset passed. Should be 8 bits\n");
+ goto err;
+ }
+
+ if (flags & SGF)
+ opcode |= LDST_SGF;
+ if (flags & VLF)
+ opcode |= LDST_VLF;
+
+ /* check load destination, length and offset and source type */
+ for (i = 0; i < load_dst_sz[rta_sec_era]; i++)
+ if (dst == load_dst[i].dst) {
+ pos = (int)i;
+ break;
+ }
+ if (-1 == pos) {
+ pr_err("LOAD: Invalid dst. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ if (flags & IMMED) {
+ if (load_dst[pos].imm_src == IMM_NO) {
+ pr_err("LOAD: Invalid source type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ opcode |= LDST_IMM;
+ } else if (load_dst[pos].imm_src == IMM_MUST) {
+ pr_err("LOAD IMM: Invalid source type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ ret = load_check_len_offset(pos, length, offset);
+ if (ret < 0) {
+ pr_err("LOAD: Invalid length/offset. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ opcode |= load_dst[pos].dst_opcode;
+
+ /* DESC BUFFER: length / offset values are specified in 4-byte words */
+ if (dst == DESCBUF) {
+ opcode |= (length >> 2);
+ opcode |= ((offset >> 2) << LDST_OFFSET_SHIFT);
+ } else {
+ opcode |= length;
+ opcode |= (offset << LDST_OFFSET_SHIFT);
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* DECO CONTROL: skip writing pointer of imm data */
+ if (dst == DCTRL)
+ return (int)start_pc;
+
+ /*
+ * For data copy, 3 possible ways to specify how to copy data:
+ * - IMMED & !COPY: copy data directly from src( max 8 bytes)
+ * - IMMED & COPY: copy data imm from the location specified by user
+ * - !IMMED and is not SEQ cmd: copy the address
+ */
+ if (flags & IMMED)
+ __rta_inline_data(program, src, flags & __COPY_MASK, length);
+ else if (!(flags & SEQ))
+ __rta_out64(program, program->ps, src);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_LOAD_CMD_H__*/
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h
new file mode 100644
index 00000000..a9b9091d
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/math_cmd.h
@@ -0,0 +1,402 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_MATH_CMD_H__
+#define __RTA_MATH_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t math_op1[][2] = {
+/*1*/ { MATH0, MATH_SRC0_REG0 },
+ { MATH1, MATH_SRC0_REG1 },
+ { MATH2, MATH_SRC0_REG2 },
+ { MATH3, MATH_SRC0_REG3 },
+ { SEQINSZ, MATH_SRC0_SEQINLEN },
+ { SEQOUTSZ, MATH_SRC0_SEQOUTLEN },
+ { VSEQINSZ, MATH_SRC0_VARSEQINLEN },
+ { VSEQOUTSZ, MATH_SRC0_VARSEQOUTLEN },
+ { ZERO, MATH_SRC0_ZERO },
+/*10*/ { NONE, 0 }, /* dummy value */
+ { DPOVRD, MATH_SRC0_DPOVRD },
+ { ONE, MATH_SRC0_ONE }
+};
+
+/*
+ * Allowed MATH op1 sources for each SEC Era.
+ * Values represent the number of entries from math_op1[] that are supported.
+ */
+static const unsigned int math_op1_sz[] = {10, 10, 12, 12, 12, 12, 12, 12};
+
+static const uint32_t math_op2[][2] = {
+/*1*/ { MATH0, MATH_SRC1_REG0 },
+ { MATH1, MATH_SRC1_REG1 },
+ { MATH2, MATH_SRC1_REG2 },
+ { MATH3, MATH_SRC1_REG3 },
+ { ABD, MATH_SRC1_INFIFO },
+ { OFIFO, MATH_SRC1_OUTFIFO },
+ { ONE, MATH_SRC1_ONE },
+/*8*/ { NONE, 0 }, /* dummy value */
+ { JOBSRC, MATH_SRC1_JOBSOURCE },
+ { DPOVRD, MATH_SRC1_DPOVRD },
+ { VSEQINSZ, MATH_SRC1_VARSEQINLEN },
+ { VSEQOUTSZ, MATH_SRC1_VARSEQOUTLEN },
+/*13*/ { ZERO, MATH_SRC1_ZERO }
+};
+
+/*
+ * Allowed MATH op2 sources for each SEC Era.
+ * Values represent the number of entries from math_op2[] that are supported.
+ */
+static const unsigned int math_op2_sz[] = {8, 9, 13, 13, 13, 13, 13, 13};
+
+static const uint32_t math_result[][2] = {
+/*1*/ { MATH0, MATH_DEST_REG0 },
+ { MATH1, MATH_DEST_REG1 },
+ { MATH2, MATH_DEST_REG2 },
+ { MATH3, MATH_DEST_REG3 },
+ { SEQINSZ, MATH_DEST_SEQINLEN },
+ { SEQOUTSZ, MATH_DEST_SEQOUTLEN },
+ { VSEQINSZ, MATH_DEST_VARSEQINLEN },
+ { VSEQOUTSZ, MATH_DEST_VARSEQOUTLEN },
+/*9*/ { NONE, MATH_DEST_NONE },
+ { DPOVRD, MATH_DEST_DPOVRD }
+};
+
+/*
+ * Allowed MATH result destinations for each SEC Era.
+ * Values represent the number of entries from math_result[] that are
+ * supported.
+ */
+static const unsigned int math_result_sz[] = {9, 9, 10, 10, 10, 10, 10, 10};
+
+static inline int
+rta_math(struct program *program, uint64_t operand1,
+ uint32_t op, uint64_t operand2, uint32_t result,
+ int length, uint32_t options)
+{
+ uint32_t opcode = CMD_MATH;
+ uint32_t val = 0;
+ int ret = -EINVAL;
+ unsigned int start_pc = program->current_pc;
+
+ if (((op == MATH_FUN_BSWAP) && (rta_sec_era < RTA_SEC_ERA_4)) ||
+ ((op == MATH_FUN_ZBYT) && (rta_sec_era < RTA_SEC_ERA_2))) {
+ pr_err("MATH: operation not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ if (options & SWP) {
+ if (rta_sec_era < RTA_SEC_ERA_7) {
+ pr_err("MATH: operation not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ if ((options & IFB) ||
+ (!(options & IMMED) && !(options & IMMED2)) ||
+ ((options & IMMED) && (options & IMMED2))) {
+ pr_err("MATH: SWP - invalid configuration. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ }
+
+ /*
+ * SHLD operation is different from others and we
+ * assume that we can have _NONE as first operand
+ * or _SEQINSZ as second operand
+ */
+ if ((op != MATH_FUN_SHLD) && ((operand1 == NONE) ||
+ (operand2 == SEQINSZ))) {
+ pr_err("MATH: Invalid operand. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ /*
+ * We first check if it is unary operation. In that
+ * case second operand must be _NONE
+ */
+ if (((op == MATH_FUN_ZBYT) || (op == MATH_FUN_BSWAP)) &&
+ (operand2 != NONE)) {
+ pr_err("MATH: Invalid operand2. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ /* Write first operand field */
+ if (options & IMMED) {
+ opcode |= MATH_SRC0_IMM;
+ } else {
+ ret = __rta_map_opcode((uint32_t)operand1, math_op1,
+ math_op1_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MATH: operand1 not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+ }
+
+ /* Write second operand field */
+ if (options & IMMED2) {
+ opcode |= MATH_SRC1_IMM;
+ } else {
+ ret = __rta_map_opcode((uint32_t)operand2, math_op2,
+ math_op2_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MATH: operand2 not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+ }
+
+ /* Write result field */
+ ret = __rta_map_opcode(result, math_result, math_result_sz[rta_sec_era],
+ &val);
+ if (ret < 0) {
+ pr_err("MATH: result not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /*
+ * as we encode operations with their "real" values, we do not
+ * to translate but we do need to validate the value
+ */
+ switch (op) {
+ /*Binary operators */
+ case (MATH_FUN_ADD):
+ case (MATH_FUN_ADDC):
+ case (MATH_FUN_SUB):
+ case (MATH_FUN_SUBB):
+ case (MATH_FUN_OR):
+ case (MATH_FUN_AND):
+ case (MATH_FUN_XOR):
+ case (MATH_FUN_LSHIFT):
+ case (MATH_FUN_RSHIFT):
+ case (MATH_FUN_SHLD):
+ /* Unary operators */
+ case (MATH_FUN_ZBYT):
+ case (MATH_FUN_BSWAP):
+ opcode |= op;
+ break;
+ default:
+ pr_err("MATH: operator is not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ opcode |= (options & ~(IMMED | IMMED2));
+
+ /* Verify length */
+ switch (length) {
+ case (1):
+ opcode |= MATH_LEN_1BYTE;
+ break;
+ case (2):
+ opcode |= MATH_LEN_2BYTE;
+ break;
+ case (4):
+ opcode |= MATH_LEN_4BYTE;
+ break;
+ case (8):
+ opcode |= MATH_LEN_8BYTE;
+ break;
+ default:
+ pr_err("MATH: length is not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* Write immediate value */
+ if ((options & IMMED) && !(options & IMMED2)) {
+ __rta_out64(program, (length > 4) && !(options & IFB),
+ operand1);
+ } else if ((options & IMMED2) && !(options & IMMED)) {
+ __rta_out64(program, (length > 4) && !(options & IFB),
+ operand2);
+ } else if ((options & IMMED) && (options & IMMED2)) {
+ __rta_out32(program, lower_32_bits(operand1));
+ __rta_out32(program, lower_32_bits(operand2));
+ }
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static inline int
+rta_mathi(struct program *program, uint64_t operand,
+ uint32_t op, uint8_t imm, uint32_t result,
+ int length, uint32_t options)
+{
+ uint32_t opcode = CMD_MATHI;
+ uint32_t val = 0;
+ int ret = -EINVAL;
+ unsigned int start_pc = program->current_pc;
+
+ if (rta_sec_era < RTA_SEC_ERA_6) {
+ pr_err("MATHI: Command not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ if (((op == MATH_FUN_FBYT) && (options & SSEL))) {
+ pr_err("MATHI: Illegal combination - FBYT and SSEL. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if ((options & SWP) && (rta_sec_era < RTA_SEC_ERA_7)) {
+ pr_err("MATHI: SWP not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ /* Write first operand field */
+ if (!(options & SSEL))
+ ret = __rta_map_opcode((uint32_t)operand, math_op1,
+ math_op1_sz[rta_sec_era], &val);
+ else
+ ret = __rta_map_opcode((uint32_t)operand, math_op2,
+ math_op2_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MATHI: operand not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if (!(options & SSEL))
+ opcode |= val;
+ else
+ opcode |= (val << (MATHI_SRC1_SHIFT - MATH_SRC1_SHIFT));
+
+ /* Write second operand field */
+ opcode |= (imm << MATHI_IMM_SHIFT);
+
+ /* Write result field */
+ ret = __rta_map_opcode(result, math_result, math_result_sz[rta_sec_era],
+ &val);
+ if (ret < 0) {
+ pr_err("MATHI: result not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= (val << (MATHI_DEST_SHIFT - MATH_DEST_SHIFT));
+
+ /*
+ * as we encode operations with their "real" values, we do not have to
+ * translate but we do need to validate the value
+ */
+ switch (op) {
+ case (MATH_FUN_ADD):
+ case (MATH_FUN_ADDC):
+ case (MATH_FUN_SUB):
+ case (MATH_FUN_SUBB):
+ case (MATH_FUN_OR):
+ case (MATH_FUN_AND):
+ case (MATH_FUN_XOR):
+ case (MATH_FUN_LSHIFT):
+ case (MATH_FUN_RSHIFT):
+ case (MATH_FUN_FBYT):
+ opcode |= op;
+ break;
+ default:
+ pr_err("MATHI: operator not supported. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ opcode |= options;
+
+ /* Verify length */
+ switch (length) {
+ case (1):
+ opcode |= MATH_LEN_1BYTE;
+ break;
+ case (2):
+ opcode |= MATH_LEN_2BYTE;
+ break;
+ case (4):
+ opcode |= MATH_LEN_4BYTE;
+ break;
+ case (8):
+ opcode |= MATH_LEN_8BYTE;
+ break;
+ default:
+ pr_err("MATHI: length %d not supported. SEC PC: %d; Instr: %d\n",
+ length, program->current_pc,
+ program->current_instruction);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_MATH_CMD_H__ */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h
new file mode 100644
index 00000000..10d2e193
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/move_cmd.h
@@ -0,0 +1,445 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_MOVE_CMD_H__
+#define __RTA_MOVE_CMD_H__
+
+#define MOVE_SET_AUX_SRC 0x01
+#define MOVE_SET_AUX_DST 0x02
+#define MOVE_SET_AUX_LS 0x03
+#define MOVE_SET_LEN_16b 0x04
+
+#define MOVE_SET_AUX_MATH 0x10
+#define MOVE_SET_AUX_MATH_SRC (MOVE_SET_AUX_SRC | MOVE_SET_AUX_MATH)
+#define MOVE_SET_AUX_MATH_DST (MOVE_SET_AUX_DST | MOVE_SET_AUX_MATH)
+
+#define MASK_16b 0xFF
+
+/* MOVE command type */
+#define __MOVE 1
+#define __MOVEB 2
+#define __MOVEDW 3
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t move_src_table[][2] = {
+/*1*/ { CONTEXT1, MOVE_SRC_CLASS1CTX },
+ { CONTEXT2, MOVE_SRC_CLASS2CTX },
+ { OFIFO, MOVE_SRC_OUTFIFO },
+ { DESCBUF, MOVE_SRC_DESCBUF },
+ { MATH0, MOVE_SRC_MATH0 },
+ { MATH1, MOVE_SRC_MATH1 },
+ { MATH2, MOVE_SRC_MATH2 },
+ { MATH3, MOVE_SRC_MATH3 },
+/*9*/ { IFIFOABD, MOVE_SRC_INFIFO },
+ { IFIFOAB1, MOVE_SRC_INFIFO_CL | MOVE_AUX_LS },
+ { IFIFOAB2, MOVE_SRC_INFIFO_CL },
+/*12*/ { ABD, MOVE_SRC_INFIFO_NO_NFIFO },
+ { AB1, MOVE_SRC_INFIFO_NO_NFIFO | MOVE_AUX_LS },
+ { AB2, MOVE_SRC_INFIFO_NO_NFIFO | MOVE_AUX_MS }
+};
+
+/* Allowed MOVE / MOVE_LEN sources for each SEC Era.
+ * Values represent the number of entries from move_src_table[] that are
+ * supported.
+ */
+static const unsigned int move_src_table_sz[] = {9, 11, 14, 14, 14, 14, 14, 14};
+
+static const uint32_t move_dst_table[][2] = {
+/*1*/ { CONTEXT1, MOVE_DEST_CLASS1CTX },
+ { CONTEXT2, MOVE_DEST_CLASS2CTX },
+ { OFIFO, MOVE_DEST_OUTFIFO },
+ { DESCBUF, MOVE_DEST_DESCBUF },
+ { MATH0, MOVE_DEST_MATH0 },
+ { MATH1, MOVE_DEST_MATH1 },
+ { MATH2, MOVE_DEST_MATH2 },
+ { MATH3, MOVE_DEST_MATH3 },
+ { IFIFOAB1, MOVE_DEST_CLASS1INFIFO },
+ { IFIFOAB2, MOVE_DEST_CLASS2INFIFO },
+ { PKA, MOVE_DEST_PK_A },
+ { KEY1, MOVE_DEST_CLASS1KEY },
+ { KEY2, MOVE_DEST_CLASS2KEY },
+/*14*/ { IFIFO, MOVE_DEST_INFIFO },
+/*15*/ { ALTSOURCE, MOVE_DEST_ALTSOURCE}
+};
+
+/* Allowed MOVE / MOVE_LEN destinations for each SEC Era.
+ * Values represent the number of entries from move_dst_table[] that are
+ * supported.
+ */
+static const
+unsigned int move_dst_table_sz[] = {13, 14, 14, 15, 15, 15, 15, 15};
+
+static inline int
+set_move_offset(struct program *program __maybe_unused,
+ uint64_t src, uint16_t src_offset,
+ uint64_t dst, uint16_t dst_offset,
+ uint16_t *offset, uint16_t *opt);
+
+static inline int
+math_offset(uint16_t offset);
+
+static inline int
+rta_move(struct program *program, int cmd_type, uint64_t src,
+ uint16_t src_offset, uint64_t dst,
+ uint16_t dst_offset, uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0;
+ uint16_t offset = 0, opt = 0;
+ uint32_t val = 0;
+ int ret = -EINVAL;
+ bool is_move_len_cmd = false;
+ unsigned int start_pc = program->current_pc;
+
+ if ((rta_sec_era < RTA_SEC_ERA_7) && (cmd_type != __MOVE)) {
+ pr_err("MOVE: MOVEB / MOVEDW not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ /* write command type */
+ if (cmd_type == __MOVEB) {
+ opcode = CMD_MOVEB;
+ } else if (cmd_type == __MOVEDW) {
+ opcode = CMD_MOVEDW;
+ } else if (!(flags & IMMED)) {
+ if (rta_sec_era < RTA_SEC_ERA_3) {
+ pr_err("MOVE: MOVE_LEN not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era), program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ if ((length != MATH0) && (length != MATH1) &&
+ (length != MATH2) && (length != MATH3)) {
+ pr_err("MOVE: MOVE_LEN length must be MATH[0-3]. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ opcode = CMD_MOVE_LEN;
+ is_move_len_cmd = true;
+ } else {
+ opcode = CMD_MOVE;
+ }
+
+ /* write offset first, to check for invalid combinations or incorrect
+ * offset values sooner; decide which offset should be here
+ * (src or dst)
+ */
+ ret = set_move_offset(program, src, src_offset, dst, dst_offset,
+ &offset, &opt);
+ if (ret < 0)
+ goto err;
+
+ opcode |= (offset << MOVE_OFFSET_SHIFT) & MOVE_OFFSET_MASK;
+
+ /* set AUX field if required */
+ if (opt == MOVE_SET_AUX_SRC) {
+ opcode |= ((src_offset / 16) << MOVE_AUX_SHIFT) & MOVE_AUX_MASK;
+ } else if (opt == MOVE_SET_AUX_DST) {
+ opcode |= ((dst_offset / 16) << MOVE_AUX_SHIFT) & MOVE_AUX_MASK;
+ } else if (opt == MOVE_SET_AUX_LS) {
+ opcode |= MOVE_AUX_LS;
+ } else if (opt & MOVE_SET_AUX_MATH) {
+ if (opt & MOVE_SET_AUX_SRC)
+ offset = src_offset;
+ else
+ offset = dst_offset;
+
+ if (rta_sec_era < RTA_SEC_ERA_6) {
+ if (offset)
+ pr_debug("MOVE: Offset not supported by SEC Era %d. SEC PC: %d; Instr: %d\n",
+ USER_SEC_ERA(rta_sec_era),
+ program->current_pc,
+ program->current_instruction);
+ /* nothing to do for offset = 0 */
+ } else {
+ ret = math_offset(offset);
+ if (ret < 0) {
+ pr_err("MOVE: Invalid offset in MATH register. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ opcode |= (uint32_t)ret;
+ }
+ }
+
+ /* write source field */
+ ret = __rta_map_opcode((uint32_t)src, move_src_table,
+ move_src_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MOVE: Invalid SRC. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /* write destination field */
+ ret = __rta_map_opcode((uint32_t)dst, move_dst_table,
+ move_dst_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("MOVE: Invalid DST. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /* write flags */
+ if (flags & (FLUSH1 | FLUSH2))
+ opcode |= MOVE_AUX_MS;
+ if (flags & (LAST2 | LAST1))
+ opcode |= MOVE_AUX_LS;
+ if (flags & WAITCOMP)
+ opcode |= MOVE_WAITCOMP;
+
+ if (!is_move_len_cmd) {
+ /* write length */
+ if (opt == MOVE_SET_LEN_16b)
+ opcode |= (length & (MOVE_OFFSET_MASK | MOVE_LEN_MASK));
+ else
+ opcode |= (length & MOVE_LEN_MASK);
+ } else {
+ /* write mrsel */
+ switch (length) {
+ case (MATH0):
+ /*
+ * opcode |= MOVELEN_MRSEL_MATH0;
+ * MOVELEN_MRSEL_MATH0 is 0
+ */
+ break;
+ case (MATH1):
+ opcode |= MOVELEN_MRSEL_MATH1;
+ break;
+ case (MATH2):
+ opcode |= MOVELEN_MRSEL_MATH2;
+ break;
+ case (MATH3):
+ opcode |= MOVELEN_MRSEL_MATH3;
+ break;
+ }
+
+ /* write size */
+ if (rta_sec_era >= RTA_SEC_ERA_7) {
+ if (flags & SIZE_WORD)
+ opcode |= MOVELEN_SIZE_WORD;
+ else if (flags & SIZE_BYTE)
+ opcode |= MOVELEN_SIZE_BYTE;
+ else if (flags & SIZE_DWORD)
+ opcode |= MOVELEN_SIZE_DWORD;
+ }
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static inline int
+set_move_offset(struct program *program __maybe_unused,
+ uint64_t src, uint16_t src_offset,
+ uint64_t dst, uint16_t dst_offset,
+ uint16_t *offset, uint16_t *opt)
+{
+ switch (src) {
+ case (CONTEXT1):
+ case (CONTEXT2):
+ if (dst == DESCBUF) {
+ *opt = MOVE_SET_AUX_SRC;
+ *offset = dst_offset;
+ } else if ((dst == KEY1) || (dst == KEY2)) {
+ if ((src_offset) && (dst_offset)) {
+ pr_err("MOVE: Bad offset. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ if (dst_offset) {
+ *opt = MOVE_SET_AUX_LS;
+ *offset = dst_offset;
+ } else {
+ *offset = src_offset;
+ }
+ } else {
+ if ((dst == MATH0) || (dst == MATH1) ||
+ (dst == MATH2) || (dst == MATH3)) {
+ *opt = MOVE_SET_AUX_MATH_DST;
+ } else if (((dst == OFIFO) || (dst == ALTSOURCE)) &&
+ (src_offset % 4)) {
+ pr_err("MOVE: Bad offset alignment. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ *offset = src_offset;
+ }
+ break;
+
+ case (OFIFO):
+ if (dst == OFIFO) {
+ pr_err("MOVE: Invalid DST. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ if (((dst == IFIFOAB1) || (dst == IFIFOAB2) ||
+ (dst == IFIFO) || (dst == PKA)) &&
+ (src_offset || dst_offset)) {
+ pr_err("MOVE: Offset should be zero. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ *offset = dst_offset;
+ break;
+
+ case (DESCBUF):
+ if ((dst == CONTEXT1) || (dst == CONTEXT2)) {
+ *opt = MOVE_SET_AUX_DST;
+ } else if ((dst == MATH0) || (dst == MATH1) ||
+ (dst == MATH2) || (dst == MATH3)) {
+ *opt = MOVE_SET_AUX_MATH_DST;
+ } else if (dst == DESCBUF) {
+ pr_err("MOVE: Invalid DST. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ } else if (((dst == OFIFO) || (dst == ALTSOURCE)) &&
+ (src_offset % 4)) {
+ pr_err("MOVE: Invalid offset alignment. SEC PC: %d; Instr %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+
+ *offset = src_offset;
+ break;
+
+ case (MATH0):
+ case (MATH1):
+ case (MATH2):
+ case (MATH3):
+ if ((dst == OFIFO) || (dst == ALTSOURCE)) {
+ if (src_offset % 4) {
+ pr_err("MOVE: Bad offset alignment. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ *offset = src_offset;
+ } else if ((dst == IFIFOAB1) || (dst == IFIFOAB2) ||
+ (dst == IFIFO) || (dst == PKA)) {
+ *offset = src_offset;
+ } else {
+ *offset = dst_offset;
+
+ /*
+ * This condition is basically the negation of:
+ * dst in { CONTEXT[1-2], MATH[0-3] }
+ */
+ if ((dst != KEY1) && (dst != KEY2))
+ *opt = MOVE_SET_AUX_MATH_SRC;
+ }
+ break;
+
+ case (IFIFOABD):
+ case (IFIFOAB1):
+ case (IFIFOAB2):
+ case (ABD):
+ case (AB1):
+ case (AB2):
+ if ((dst == IFIFOAB1) || (dst == IFIFOAB2) ||
+ (dst == IFIFO) || (dst == PKA) || (dst == ALTSOURCE)) {
+ pr_err("MOVE: Bad DST. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ } else {
+ if (dst == OFIFO) {
+ *opt = MOVE_SET_LEN_16b;
+ } else {
+ if (dst_offset % 4) {
+ pr_err("MOVE: Bad offset alignment. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ *offset = dst_offset;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+ err:
+ return -EINVAL;
+}
+
+static inline int
+math_offset(uint16_t offset)
+{
+ switch (offset) {
+ case 0:
+ return 0;
+ case 4:
+ return MOVE_AUX_LS;
+ case 6:
+ return MOVE_AUX_MS;
+ case 7:
+ return MOVE_AUX_LS | MOVE_AUX_MS;
+ }
+
+ return -EINVAL;
+}
+
+#endif /* __RTA_MOVE_CMD_H__ */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h
new file mode 100644
index 00000000..30be0825
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/nfifo_cmd.h
@@ -0,0 +1,196 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_NFIFO_CMD_H__
+#define __RTA_NFIFO_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t nfifo_src[][2] = {
+/*1*/ { IFIFO, NFIFOENTRY_STYPE_DFIFO },
+ { OFIFO, NFIFOENTRY_STYPE_OFIFO },
+ { PAD, NFIFOENTRY_STYPE_PAD },
+/*4*/ { MSGOUTSNOOP, NFIFOENTRY_STYPE_SNOOP | NFIFOENTRY_DEST_BOTH },
+/*5*/ { ALTSOURCE, NFIFOENTRY_STYPE_ALTSOURCE },
+ { OFIFO_SYNC, NFIFOENTRY_STYPE_OFIFO_SYNC },
+/*7*/ { MSGOUTSNOOP_ALT, NFIFOENTRY_STYPE_SNOOP_ALT | NFIFOENTRY_DEST_BOTH }
+};
+
+/*
+ * Allowed NFIFO LOAD sources for each SEC Era.
+ * Values represent the number of entries from nfifo_src[] that are supported.
+ */
+static const unsigned int nfifo_src_sz[] = {4, 5, 5, 5, 5, 5, 5, 7};
+
+static const uint32_t nfifo_data[][2] = {
+ { MSG, NFIFOENTRY_DTYPE_MSG },
+ { MSG1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_MSG },
+ { MSG2, NFIFOENTRY_DEST_CLASS2 | NFIFOENTRY_DTYPE_MSG },
+ { IV1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_IV },
+ { IV2, NFIFOENTRY_DEST_CLASS2 | NFIFOENTRY_DTYPE_IV },
+ { ICV1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_ICV },
+ { ICV2, NFIFOENTRY_DEST_CLASS2 | NFIFOENTRY_DTYPE_ICV },
+ { SAD1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_SAD },
+ { AAD1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_AAD },
+ { AAD2, NFIFOENTRY_DEST_CLASS2 | NFIFOENTRY_DTYPE_AAD },
+ { AFHA_SBOX, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_SBOX },
+ { SKIP, NFIFOENTRY_DTYPE_SKIP },
+ { PKE, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_E },
+ { PKN, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_N },
+ { PKA, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A },
+ { PKA0, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A0 },
+ { PKA1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A1 },
+ { PKA2, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A2 },
+ { PKA3, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_A3 },
+ { PKB, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B },
+ { PKB0, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B0 },
+ { PKB1, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B1 },
+ { PKB2, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B2 },
+ { PKB3, NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_DTYPE_PK_B3 },
+ { AB1, NFIFOENTRY_DEST_CLASS1 },
+ { AB2, NFIFOENTRY_DEST_CLASS2 },
+ { ABD, NFIFOENTRY_DEST_DECO }
+};
+
+static const uint32_t nfifo_flags[][2] = {
+/*1*/ { LAST1, NFIFOENTRY_LC1 },
+ { LAST2, NFIFOENTRY_LC2 },
+ { FLUSH1, NFIFOENTRY_FC1 },
+ { BP, NFIFOENTRY_BND },
+ { PAD_ZERO, NFIFOENTRY_PTYPE_ZEROS },
+ { PAD_NONZERO, NFIFOENTRY_PTYPE_RND_NOZEROS },
+ { PAD_INCREMENT, NFIFOENTRY_PTYPE_INCREMENT },
+ { PAD_RANDOM, NFIFOENTRY_PTYPE_RND },
+ { PAD_ZERO_N1, NFIFOENTRY_PTYPE_ZEROS_NZ },
+ { PAD_NONZERO_0, NFIFOENTRY_PTYPE_RND_NZ_LZ },
+ { PAD_N1, NFIFOENTRY_PTYPE_N },
+/*12*/ { PAD_NONZERO_N, NFIFOENTRY_PTYPE_RND_NZ_N },
+ { FLUSH2, NFIFOENTRY_FC2 },
+ { OC, NFIFOENTRY_OC }
+};
+
+/*
+ * Allowed NFIFO LOAD flags for each SEC Era.
+ * Values represent the number of entries from nfifo_flags[] that are supported.
+ */
+static const unsigned int nfifo_flags_sz[] = {12, 14, 14, 14, 14, 14, 14, 14};
+
+static const uint32_t nfifo_pad_flags[][2] = {
+ { BM, NFIFOENTRY_BM },
+ { PS, NFIFOENTRY_PS },
+ { PR, NFIFOENTRY_PR }
+};
+
+/*
+ * Allowed NFIFO LOAD pad flags for each SEC Era.
+ * Values represent the number of entries from nfifo_pad_flags[] that are
+ * supported.
+ */
+static const unsigned int nfifo_pad_flags_sz[] = {2, 2, 2, 2, 3, 3, 3, 3};
+
+static inline int
+rta_nfifo_load(struct program *program, uint32_t src,
+ uint32_t data, uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = 0, val;
+ int ret = -EINVAL;
+ uint32_t load_cmd = CMD_LOAD | LDST_IMM | LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO;
+ unsigned int start_pc = program->current_pc;
+
+ if ((data == AFHA_SBOX) && (rta_sec_era == RTA_SEC_ERA_7)) {
+ pr_err("NFIFO: AFHA S-box not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+
+ /* write source field */
+ ret = __rta_map_opcode(src, nfifo_src, nfifo_src_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("NFIFO: Invalid SRC. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /* write type field */
+ ret = __rta_map_opcode(data, nfifo_data, ARRAY_SIZE(nfifo_data), &val);
+ if (ret < 0) {
+ pr_err("NFIFO: Invalid data. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+
+ /* write DL field */
+ if (!(flags & EXT)) {
+ opcode |= length & NFIFOENTRY_DLEN_MASK;
+ load_cmd |= 4;
+ } else {
+ load_cmd |= 8;
+ }
+
+ /* write flags */
+ __rta_map_flags(flags, nfifo_flags, nfifo_flags_sz[rta_sec_era],
+ &opcode);
+
+ /* in case of padding, check the destination */
+ if (src == PAD)
+ __rta_map_flags(flags, nfifo_pad_flags,
+ nfifo_pad_flags_sz[rta_sec_era], &opcode);
+
+ /* write LOAD command first */
+ __rta_out32(program, load_cmd);
+ __rta_out32(program, opcode);
+
+ if (flags & EXT)
+ __rta_out32(program, length & NFIFOENTRY_DLEN_MASK);
+
+ program->current_instruction++;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_NFIFO_CMD_H__ */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h
new file mode 100644
index 00000000..5e88fb46
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/operation_cmd.h
@@ -0,0 +1,599 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_OPERATION_CMD_H__
+#define __RTA_OPERATION_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static inline int
+__rta_alg_aai_aes(uint16_t aai)
+{
+ uint16_t aes_mode = aai & OP_ALG_AESA_MODE_MASK;
+
+ if (aai & OP_ALG_AAI_C2K) {
+ if (rta_sec_era < RTA_SEC_ERA_5)
+ return -1;
+ if ((aes_mode != OP_ALG_AAI_CCM) &&
+ (aes_mode != OP_ALG_AAI_GCM))
+ return -EINVAL;
+ }
+
+ switch (aes_mode) {
+ case OP_ALG_AAI_CBC_CMAC:
+ case OP_ALG_AAI_CTR_CMAC_LTE:
+ case OP_ALG_AAI_CTR_CMAC:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_ALG_AAI_CTR:
+ case OP_ALG_AAI_CBC:
+ case OP_ALG_AAI_ECB:
+ case OP_ALG_AAI_OFB:
+ case OP_ALG_AAI_CFB:
+ case OP_ALG_AAI_XTS:
+ case OP_ALG_AAI_CMAC:
+ case OP_ALG_AAI_XCBC_MAC:
+ case OP_ALG_AAI_CCM:
+ case OP_ALG_AAI_GCM:
+ case OP_ALG_AAI_CBC_XCBCMAC:
+ case OP_ALG_AAI_CTR_XCBCMAC:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_des(uint16_t aai)
+{
+ uint16_t aai_code = (uint16_t)(aai & ~OP_ALG_AAI_CHECKODD);
+
+ switch (aai_code) {
+ case OP_ALG_AAI_CBC:
+ case OP_ALG_AAI_ECB:
+ case OP_ALG_AAI_CFB:
+ case OP_ALG_AAI_OFB:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_md5(uint16_t aai)
+{
+ switch (aai) {
+ case OP_ALG_AAI_HMAC:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_ALG_AAI_SMAC:
+ case OP_ALG_AAI_HASH:
+ case OP_ALG_AAI_HMAC_PRECOMP:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_sha(uint16_t aai)
+{
+ switch (aai) {
+ case OP_ALG_AAI_HMAC:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_ALG_AAI_HASH:
+ case OP_ALG_AAI_HMAC_PRECOMP:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_rng(uint16_t aai)
+{
+ uint16_t rng_mode = aai & OP_ALG_RNG_MODE_MASK;
+ uint16_t rng_sh = aai & OP_ALG_AAI_RNG4_SH_MASK;
+
+ switch (rng_mode) {
+ case OP_ALG_AAI_RNG:
+ case OP_ALG_AAI_RNG_NZB:
+ case OP_ALG_AAI_RNG_OBP:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* State Handle bits are valid only for SEC Era >= 5 */
+ if ((rta_sec_era < RTA_SEC_ERA_5) && rng_sh)
+ return -EINVAL;
+
+ /* PS, AI, SK bits are also valid only for SEC Era >= 5 */
+ if ((rta_sec_era < RTA_SEC_ERA_5) && (aai &
+ (OP_ALG_AAI_RNG4_PS | OP_ALG_AAI_RNG4_AI | OP_ALG_AAI_RNG4_SK)))
+ return -EINVAL;
+
+ switch (rng_sh) {
+ case OP_ALG_AAI_RNG4_SH_0:
+ case OP_ALG_AAI_RNG4_SH_1:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_crc(uint16_t aai)
+{
+ uint16_t aai_code = aai & OP_ALG_CRC_POLY_MASK;
+
+ switch (aai_code) {
+ case OP_ALG_AAI_802:
+ case OP_ALG_AAI_3385:
+ case OP_ALG_AAI_CUST_POLY:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_kasumi(uint16_t aai)
+{
+ switch (aai) {
+ case OP_ALG_AAI_GSM:
+ case OP_ALG_AAI_EDGE:
+ case OP_ALG_AAI_F8:
+ case OP_ALG_AAI_F9:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_snow_f9(uint16_t aai)
+{
+ if (aai == OP_ALG_AAI_F9)
+ return 0;
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_snow_f8(uint16_t aai)
+{
+ if (aai == OP_ALG_AAI_F8)
+ return 0;
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_zuce(uint16_t aai)
+{
+ if (aai == OP_ALG_AAI_F8)
+ return 0;
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_alg_aai_zuca(uint16_t aai)
+{
+ if (aai == OP_ALG_AAI_F9)
+ return 0;
+
+ return -EINVAL;
+}
+
+struct alg_aai_map {
+ uint32_t chipher_algo;
+ int (*aai_func)(uint16_t);
+ uint32_t class;
+};
+
+static const struct alg_aai_map alg_table[] = {
+/*1*/ { OP_ALG_ALGSEL_AES, __rta_alg_aai_aes, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_DES, __rta_alg_aai_des, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_3DES, __rta_alg_aai_des, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_MD5, __rta_alg_aai_md5, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA1, __rta_alg_aai_md5, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA224, __rta_alg_aai_sha, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA256, __rta_alg_aai_sha, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA384, __rta_alg_aai_sha, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_SHA512, __rta_alg_aai_sha, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_RNG, __rta_alg_aai_rng, OP_TYPE_CLASS1_ALG },
+/*11*/ { OP_ALG_ALGSEL_CRC, __rta_alg_aai_crc, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_ARC4, NULL, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_SNOW_F8, __rta_alg_aai_snow_f8, OP_TYPE_CLASS1_ALG },
+/*14*/ { OP_ALG_ALGSEL_KASUMI, __rta_alg_aai_kasumi, OP_TYPE_CLASS1_ALG },
+ { OP_ALG_ALGSEL_SNOW_F9, __rta_alg_aai_snow_f9, OP_TYPE_CLASS2_ALG },
+ { OP_ALG_ALGSEL_ZUCE, __rta_alg_aai_zuce, OP_TYPE_CLASS1_ALG },
+/*17*/ { OP_ALG_ALGSEL_ZUCA, __rta_alg_aai_zuca, OP_TYPE_CLASS2_ALG }
+};
+
+/*
+ * Allowed OPERATION algorithms for each SEC Era.
+ * Values represent the number of entries from alg_table[] that are supported.
+ */
+static const unsigned int alg_table_sz[] = {14, 15, 15, 15, 17, 17, 11, 17};
+
+static inline int
+rta_operation(struct program *program, uint32_t cipher_algo,
+ uint16_t aai, uint8_t algo_state,
+ int icv_checking, int enc)
+{
+ uint32_t opcode = CMD_OPERATION;
+ unsigned int i, found = 0;
+ unsigned int start_pc = program->current_pc;
+ int ret;
+
+ for (i = 0; i < alg_table_sz[rta_sec_era]; i++) {
+ if (alg_table[i].chipher_algo == cipher_algo) {
+ opcode |= cipher_algo | alg_table[i].class;
+ /* nothing else to verify */
+ if (alg_table[i].aai_func == NULL) {
+ found = 1;
+ break;
+ }
+
+ aai &= OP_ALG_AAI_MASK;
+
+ ret = (*alg_table[i].aai_func)(aai);
+ if (ret < 0) {
+ pr_err("OPERATION: Bad AAI Type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ opcode |= aai;
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ pr_err("OPERATION: Invalid Command. SEC Program Line: %d\n",
+ program->current_pc);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (algo_state) {
+ case OP_ALG_AS_UPDATE:
+ case OP_ALG_AS_INIT:
+ case OP_ALG_AS_FINALIZE:
+ case OP_ALG_AS_INITFINAL:
+ opcode |= algo_state;
+ break;
+ default:
+ pr_err("Invalid Operation Command\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (icv_checking) {
+ case ICV_CHECK_DISABLE:
+ /*
+ * opcode |= OP_ALG_ICV_OFF;
+ * OP_ALG_ICV_OFF is 0
+ */
+ break;
+ case ICV_CHECK_ENABLE:
+ opcode |= OP_ALG_ICV_ON;
+ break;
+ default:
+ pr_err("Invalid Operation Command\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (enc) {
+ case DIR_DEC:
+ /*
+ * opcode |= OP_ALG_DECRYPT;
+ * OP_ALG_DECRYPT is 0
+ */
+ break;
+ case DIR_ENC:
+ opcode |= OP_ALG_ENCRYPT;
+ break;
+ default:
+ pr_err("Invalid Operation Command\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ return ret;
+}
+
+/*
+ * OPERATION PKHA routines
+ */
+static inline int
+__rta_pkha_clearmem(uint32_t pkha_op)
+{
+ switch (pkha_op) {
+ case (OP_ALG_PKMODE_CLEARMEM_ALL):
+ case (OP_ALG_PKMODE_CLEARMEM_ABE):
+ case (OP_ALG_PKMODE_CLEARMEM_ABN):
+ case (OP_ALG_PKMODE_CLEARMEM_AB):
+ case (OP_ALG_PKMODE_CLEARMEM_AEN):
+ case (OP_ALG_PKMODE_CLEARMEM_AE):
+ case (OP_ALG_PKMODE_CLEARMEM_AN):
+ case (OP_ALG_PKMODE_CLEARMEM_A):
+ case (OP_ALG_PKMODE_CLEARMEM_BEN):
+ case (OP_ALG_PKMODE_CLEARMEM_BE):
+ case (OP_ALG_PKMODE_CLEARMEM_BN):
+ case (OP_ALG_PKMODE_CLEARMEM_B):
+ case (OP_ALG_PKMODE_CLEARMEM_EN):
+ case (OP_ALG_PKMODE_CLEARMEM_N):
+ case (OP_ALG_PKMODE_CLEARMEM_E):
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_pkha_mod_arithmetic(uint32_t pkha_op)
+{
+ pkha_op &= (uint32_t)~OP_ALG_PKMODE_OUT_A;
+
+ switch (pkha_op) {
+ case (OP_ALG_PKMODE_MOD_ADD):
+ case (OP_ALG_PKMODE_MOD_SUB_AB):
+ case (OP_ALG_PKMODE_MOD_SUB_BA):
+ case (OP_ALG_PKMODE_MOD_MULT):
+ case (OP_ALG_PKMODE_MOD_MULT_IM):
+ case (OP_ALG_PKMODE_MOD_MULT_IM_OM):
+ case (OP_ALG_PKMODE_MOD_EXPO):
+ case (OP_ALG_PKMODE_MOD_EXPO_TEQ):
+ case (OP_ALG_PKMODE_MOD_EXPO_IM):
+ case (OP_ALG_PKMODE_MOD_EXPO_IM_TEQ):
+ case (OP_ALG_PKMODE_MOD_REDUCT):
+ case (OP_ALG_PKMODE_MOD_INV):
+ case (OP_ALG_PKMODE_MOD_MONT_CNST):
+ case (OP_ALG_PKMODE_MOD_CRT_CNST):
+ case (OP_ALG_PKMODE_MOD_GCD):
+ case (OP_ALG_PKMODE_MOD_PRIMALITY):
+ case (OP_ALG_PKMODE_MOD_SML_EXP):
+ case (OP_ALG_PKMODE_F2M_ADD):
+ case (OP_ALG_PKMODE_F2M_MUL):
+ case (OP_ALG_PKMODE_F2M_MUL_IM):
+ case (OP_ALG_PKMODE_F2M_MUL_IM_OM):
+ case (OP_ALG_PKMODE_F2M_EXP):
+ case (OP_ALG_PKMODE_F2M_EXP_TEQ):
+ case (OP_ALG_PKMODE_F2M_AMODN):
+ case (OP_ALG_PKMODE_F2M_INV):
+ case (OP_ALG_PKMODE_F2M_R2):
+ case (OP_ALG_PKMODE_F2M_GCD):
+ case (OP_ALG_PKMODE_F2M_SML_EXP):
+ case (OP_ALG_PKMODE_ECC_F2M_ADD):
+ case (OP_ALG_PKMODE_ECC_F2M_ADD_IM_OM_PROJ):
+ case (OP_ALG_PKMODE_ECC_F2M_DBL):
+ case (OP_ALG_PKMODE_ECC_F2M_DBL_IM_OM_PROJ):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_TEQ):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_R2):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_R2_TEQ):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_R2_PROJ):
+ case (OP_ALG_PKMODE_ECC_F2M_MUL_R2_PROJ_TEQ):
+ case (OP_ALG_PKMODE_ECC_MOD_ADD):
+ case (OP_ALG_PKMODE_ECC_MOD_ADD_IM_OM_PROJ):
+ case (OP_ALG_PKMODE_ECC_MOD_DBL):
+ case (OP_ALG_PKMODE_ECC_MOD_DBL_IM_OM_PROJ):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_TEQ):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_R2):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_R2_TEQ):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_R2_PROJ):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL_R2_PROJ_TEQ):
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_pkha_copymem(uint32_t pkha_op)
+{
+ switch (pkha_op) {
+ case (OP_ALG_PKMODE_COPY_NSZ_A0_B0):
+ case (OP_ALG_PKMODE_COPY_NSZ_A0_B1):
+ case (OP_ALG_PKMODE_COPY_NSZ_A0_B2):
+ case (OP_ALG_PKMODE_COPY_NSZ_A0_B3):
+ case (OP_ALG_PKMODE_COPY_NSZ_A1_B0):
+ case (OP_ALG_PKMODE_COPY_NSZ_A1_B1):
+ case (OP_ALG_PKMODE_COPY_NSZ_A1_B2):
+ case (OP_ALG_PKMODE_COPY_NSZ_A1_B3):
+ case (OP_ALG_PKMODE_COPY_NSZ_A2_B0):
+ case (OP_ALG_PKMODE_COPY_NSZ_A2_B1):
+ case (OP_ALG_PKMODE_COPY_NSZ_A2_B2):
+ case (OP_ALG_PKMODE_COPY_NSZ_A2_B3):
+ case (OP_ALG_PKMODE_COPY_NSZ_A3_B0):
+ case (OP_ALG_PKMODE_COPY_NSZ_A3_B1):
+ case (OP_ALG_PKMODE_COPY_NSZ_A3_B2):
+ case (OP_ALG_PKMODE_COPY_NSZ_A3_B3):
+ case (OP_ALG_PKMODE_COPY_NSZ_B0_A0):
+ case (OP_ALG_PKMODE_COPY_NSZ_B0_A1):
+ case (OP_ALG_PKMODE_COPY_NSZ_B0_A2):
+ case (OP_ALG_PKMODE_COPY_NSZ_B0_A3):
+ case (OP_ALG_PKMODE_COPY_NSZ_B1_A0):
+ case (OP_ALG_PKMODE_COPY_NSZ_B1_A1):
+ case (OP_ALG_PKMODE_COPY_NSZ_B1_A2):
+ case (OP_ALG_PKMODE_COPY_NSZ_B1_A3):
+ case (OP_ALG_PKMODE_COPY_NSZ_B2_A0):
+ case (OP_ALG_PKMODE_COPY_NSZ_B2_A1):
+ case (OP_ALG_PKMODE_COPY_NSZ_B2_A2):
+ case (OP_ALG_PKMODE_COPY_NSZ_B2_A3):
+ case (OP_ALG_PKMODE_COPY_NSZ_B3_A0):
+ case (OP_ALG_PKMODE_COPY_NSZ_B3_A1):
+ case (OP_ALG_PKMODE_COPY_NSZ_B3_A2):
+ case (OP_ALG_PKMODE_COPY_NSZ_B3_A3):
+ case (OP_ALG_PKMODE_COPY_NSZ_A_E):
+ case (OP_ALG_PKMODE_COPY_NSZ_A_N):
+ case (OP_ALG_PKMODE_COPY_NSZ_B_E):
+ case (OP_ALG_PKMODE_COPY_NSZ_B_N):
+ case (OP_ALG_PKMODE_COPY_NSZ_N_A):
+ case (OP_ALG_PKMODE_COPY_NSZ_N_B):
+ case (OP_ALG_PKMODE_COPY_NSZ_N_E):
+ case (OP_ALG_PKMODE_COPY_SSZ_A0_B0):
+ case (OP_ALG_PKMODE_COPY_SSZ_A0_B1):
+ case (OP_ALG_PKMODE_COPY_SSZ_A0_B2):
+ case (OP_ALG_PKMODE_COPY_SSZ_A0_B3):
+ case (OP_ALG_PKMODE_COPY_SSZ_A1_B0):
+ case (OP_ALG_PKMODE_COPY_SSZ_A1_B1):
+ case (OP_ALG_PKMODE_COPY_SSZ_A1_B2):
+ case (OP_ALG_PKMODE_COPY_SSZ_A1_B3):
+ case (OP_ALG_PKMODE_COPY_SSZ_A2_B0):
+ case (OP_ALG_PKMODE_COPY_SSZ_A2_B1):
+ case (OP_ALG_PKMODE_COPY_SSZ_A2_B2):
+ case (OP_ALG_PKMODE_COPY_SSZ_A2_B3):
+ case (OP_ALG_PKMODE_COPY_SSZ_A3_B0):
+ case (OP_ALG_PKMODE_COPY_SSZ_A3_B1):
+ case (OP_ALG_PKMODE_COPY_SSZ_A3_B2):
+ case (OP_ALG_PKMODE_COPY_SSZ_A3_B3):
+ case (OP_ALG_PKMODE_COPY_SSZ_B0_A0):
+ case (OP_ALG_PKMODE_COPY_SSZ_B0_A1):
+ case (OP_ALG_PKMODE_COPY_SSZ_B0_A2):
+ case (OP_ALG_PKMODE_COPY_SSZ_B0_A3):
+ case (OP_ALG_PKMODE_COPY_SSZ_B1_A0):
+ case (OP_ALG_PKMODE_COPY_SSZ_B1_A1):
+ case (OP_ALG_PKMODE_COPY_SSZ_B1_A2):
+ case (OP_ALG_PKMODE_COPY_SSZ_B1_A3):
+ case (OP_ALG_PKMODE_COPY_SSZ_B2_A0):
+ case (OP_ALG_PKMODE_COPY_SSZ_B2_A1):
+ case (OP_ALG_PKMODE_COPY_SSZ_B2_A2):
+ case (OP_ALG_PKMODE_COPY_SSZ_B2_A3):
+ case (OP_ALG_PKMODE_COPY_SSZ_B3_A0):
+ case (OP_ALG_PKMODE_COPY_SSZ_B3_A1):
+ case (OP_ALG_PKMODE_COPY_SSZ_B3_A2):
+ case (OP_ALG_PKMODE_COPY_SSZ_B3_A3):
+ case (OP_ALG_PKMODE_COPY_SSZ_A_E):
+ case (OP_ALG_PKMODE_COPY_SSZ_A_N):
+ case (OP_ALG_PKMODE_COPY_SSZ_B_E):
+ case (OP_ALG_PKMODE_COPY_SSZ_B_N):
+ case (OP_ALG_PKMODE_COPY_SSZ_N_A):
+ case (OP_ALG_PKMODE_COPY_SSZ_N_B):
+ case (OP_ALG_PKMODE_COPY_SSZ_N_E):
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+rta_pkha_operation(struct program *program, uint32_t op_pkha)
+{
+ uint32_t opcode = CMD_OPERATION | OP_TYPE_PK | OP_ALG_PK;
+ uint32_t pkha_func;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ pkha_func = op_pkha & OP_ALG_PK_FUN_MASK;
+
+ switch (pkha_func) {
+ case (OP_ALG_PKMODE_CLEARMEM):
+ ret = __rta_pkha_clearmem(op_pkha);
+ if (ret < 0) {
+ pr_err("OPERATION PKHA: Type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ break;
+ case (OP_ALG_PKMODE_MOD_ADD):
+ case (OP_ALG_PKMODE_MOD_SUB_AB):
+ case (OP_ALG_PKMODE_MOD_SUB_BA):
+ case (OP_ALG_PKMODE_MOD_MULT):
+ case (OP_ALG_PKMODE_MOD_EXPO):
+ case (OP_ALG_PKMODE_MOD_REDUCT):
+ case (OP_ALG_PKMODE_MOD_INV):
+ case (OP_ALG_PKMODE_MOD_MONT_CNST):
+ case (OP_ALG_PKMODE_MOD_CRT_CNST):
+ case (OP_ALG_PKMODE_MOD_GCD):
+ case (OP_ALG_PKMODE_MOD_PRIMALITY):
+ case (OP_ALG_PKMODE_MOD_SML_EXP):
+ case (OP_ALG_PKMODE_ECC_MOD_ADD):
+ case (OP_ALG_PKMODE_ECC_MOD_DBL):
+ case (OP_ALG_PKMODE_ECC_MOD_MUL):
+ ret = __rta_pkha_mod_arithmetic(op_pkha);
+ if (ret < 0) {
+ pr_err("OPERATION PKHA: Type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ break;
+ case (OP_ALG_PKMODE_COPY_NSZ):
+ case (OP_ALG_PKMODE_COPY_SSZ):
+ ret = __rta_pkha_copymem(op_pkha);
+ if (ret < 0) {
+ pr_err("OPERATION PKHA: Type not supported. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ break;
+ default:
+ pr_err("Invalid Operation Command\n");
+ goto err;
+ }
+
+ opcode |= op_pkha;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_OPERATION_CMD_H__ */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
new file mode 100644
index 00000000..2e7b2f2d
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
@@ -0,0 +1,732 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_PROTOCOL_CMD_H__
+#define __RTA_PROTOCOL_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static inline int
+__rta_ssl_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_SSL30_RC4_40_MD5_2:
+ case OP_PCL_SSL30_RC4_128_MD5_2:
+ case OP_PCL_SSL30_RC4_128_SHA_5:
+ case OP_PCL_SSL30_RC4_40_MD5_3:
+ case OP_PCL_SSL30_RC4_128_MD5_3:
+ case OP_PCL_SSL30_RC4_128_SHA:
+ case OP_PCL_SSL30_RC4_128_MD5:
+ case OP_PCL_SSL30_RC4_40_SHA:
+ case OP_PCL_SSL30_RC4_40_MD5:
+ case OP_PCL_SSL30_RC4_128_SHA_2:
+ case OP_PCL_SSL30_RC4_128_SHA_3:
+ case OP_PCL_SSL30_RC4_128_SHA_4:
+ case OP_PCL_SSL30_RC4_128_SHA_6:
+ case OP_PCL_SSL30_RC4_128_SHA_7:
+ case OP_PCL_SSL30_RC4_128_SHA_8:
+ case OP_PCL_SSL30_RC4_128_SHA_9:
+ case OP_PCL_SSL30_RC4_128_SHA_10:
+ case OP_PCL_TLS_ECDHE_PSK_RC4_128_SHA:
+ if (rta_sec_era == RTA_SEC_ERA_7)
+ return -EINVAL;
+ /* fall through if not Era 7 */
+ case OP_PCL_SSL30_DES40_CBC_SHA:
+ case OP_PCL_SSL30_DES_CBC_SHA_2:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_5:
+ case OP_PCL_SSL30_DES40_CBC_SHA_2:
+ case OP_PCL_SSL30_DES_CBC_SHA_3:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_6:
+ case OP_PCL_SSL30_DES40_CBC_SHA_3:
+ case OP_PCL_SSL30_DES_CBC_SHA_4:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_7:
+ case OP_PCL_SSL30_DES40_CBC_SHA_4:
+ case OP_PCL_SSL30_DES_CBC_SHA_5:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_8:
+ case OP_PCL_SSL30_DES40_CBC_SHA_5:
+ case OP_PCL_SSL30_DES_CBC_SHA_6:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_9:
+ case OP_PCL_SSL30_DES40_CBC_SHA_6:
+ case OP_PCL_SSL30_DES_CBC_SHA_7:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_10:
+ case OP_PCL_SSL30_DES_CBC_SHA:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA:
+ case OP_PCL_SSL30_DES_CBC_MD5:
+ case OP_PCL_SSL30_3DES_EDE_CBC_MD5:
+ case OP_PCL_SSL30_DES40_CBC_SHA_7:
+ case OP_PCL_SSL30_DES40_CBC_MD5:
+ case OP_PCL_SSL30_AES_128_CBC_SHA:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_2:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_3:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_4:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_5:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_6:
+ case OP_PCL_SSL30_AES_256_CBC_SHA:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_2:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_3:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_4:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_5:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_6:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256_2:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256_3:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256_4:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256_5:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256_2:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256_3:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256_4:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256_5:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256_6:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256_6:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_2:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_7:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_7:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_3:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_8:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_8:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_4:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_9:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_9:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_1:
+ case OP_PCL_SSL30_AES_256_GCM_SHA384_1:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_2:
+ case OP_PCL_SSL30_AES_256_GCM_SHA384_2:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_3:
+ case OP_PCL_SSL30_AES_256_GCM_SHA384_3:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_4:
+ case OP_PCL_SSL30_AES_256_GCM_SHA384_4:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_5:
+ case OP_PCL_SSL30_AES_256_GCM_SHA384_5:
+ case OP_PCL_SSL30_AES_128_GCM_SHA256_6:
+ case OP_PCL_TLS_DH_ANON_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_PSK_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_PSK_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DHE_PSK_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DHE_PSK_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_RSA_PSK_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_RSA_PSK_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_PSK_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_PSK_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_DHE_PSK_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DHE_PSK_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_RSA_PSK_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_RSA_PSK_AES_256_CBC_SHA384:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_11:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_10:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_10:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_12:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_11:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_11:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_12:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_13:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_12:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_14:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_13:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_13:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_15:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_14:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_14:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_16:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_17:
+ case OP_PCL_SSL30_3DES_EDE_CBC_SHA_18:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_15:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_16:
+ case OP_PCL_SSL30_AES_128_CBC_SHA_17:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_15:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_16:
+ case OP_PCL_SSL30_AES_256_CBC_SHA_17:
+ case OP_PCL_TLS_ECDHE_ECDSA_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_ECDSA_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDH_ECDSA_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDH_ECDSA_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDHE_RSA_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_RSA_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDH_RSA_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDH_RSA_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDHE_ECDSA_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDHE_ECDSA_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDH_ECDSA_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDH_ECDSA_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDHE_RSA_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDHE_RSA_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDH_RSA_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDH_RSA_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDHE_PSK_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA384:
+ case OP_PCL_TLS12_3DES_EDE_CBC_MD5:
+ case OP_PCL_TLS12_3DES_EDE_CBC_SHA160:
+ case OP_PCL_TLS12_3DES_EDE_CBC_SHA224:
+ case OP_PCL_TLS12_3DES_EDE_CBC_SHA256:
+ case OP_PCL_TLS12_3DES_EDE_CBC_SHA384:
+ case OP_PCL_TLS12_3DES_EDE_CBC_SHA512:
+ case OP_PCL_TLS12_AES_128_CBC_SHA160:
+ case OP_PCL_TLS12_AES_128_CBC_SHA224:
+ case OP_PCL_TLS12_AES_128_CBC_SHA256:
+ case OP_PCL_TLS12_AES_128_CBC_SHA384:
+ case OP_PCL_TLS12_AES_128_CBC_SHA512:
+ case OP_PCL_TLS12_AES_192_CBC_SHA160:
+ case OP_PCL_TLS12_AES_192_CBC_SHA224:
+ case OP_PCL_TLS12_AES_192_CBC_SHA256:
+ case OP_PCL_TLS12_AES_192_CBC_SHA512:
+ case OP_PCL_TLS12_AES_256_CBC_SHA160:
+ case OP_PCL_TLS12_AES_256_CBC_SHA224:
+ case OP_PCL_TLS12_AES_256_CBC_SHA256:
+ case OP_PCL_TLS12_AES_256_CBC_SHA384:
+ case OP_PCL_TLS12_AES_256_CBC_SHA512:
+ case OP_PCL_TLS_PVT_AES_192_CBC_SHA160:
+ case OP_PCL_TLS_PVT_AES_192_CBC_SHA384:
+ case OP_PCL_TLS_PVT_AES_192_CBC_SHA224:
+ case OP_PCL_TLS_PVT_AES_192_CBC_SHA512:
+ case OP_PCL_TLS_PVT_AES_192_CBC_SHA256:
+ case OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FE:
+ case OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FF:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_ike_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_IKE_HMAC_MD5:
+ case OP_PCL_IKE_HMAC_SHA1:
+ case OP_PCL_IKE_HMAC_AES128_CBC:
+ case OP_PCL_IKE_HMAC_SHA256:
+ case OP_PCL_IKE_HMAC_SHA384:
+ case OP_PCL_IKE_HMAC_SHA512:
+ case OP_PCL_IKE_HMAC_AES128_CMAC:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_ipsec_proto(uint16_t protoinfo)
+{
+ uint16_t proto_cls1 = protoinfo & OP_PCL_IPSEC_CIPHER_MASK;
+ uint16_t proto_cls2 = protoinfo & OP_PCL_IPSEC_AUTH_MASK;
+
+ switch (proto_cls1) {
+ case OP_PCL_IPSEC_AES_NULL_WITH_GMAC:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_PCL_IPSEC_AES_CCM8:
+ case OP_PCL_IPSEC_AES_CCM12:
+ case OP_PCL_IPSEC_AES_CCM16:
+ case OP_PCL_IPSEC_AES_GCM8:
+ case OP_PCL_IPSEC_AES_GCM12:
+ case OP_PCL_IPSEC_AES_GCM16:
+ /* CCM, GCM, GMAC require PROTINFO[7:0] = 0 */
+ if (proto_cls2 == OP_PCL_IPSEC_HMAC_NULL)
+ return 0;
+ return -EINVAL;
+ case OP_PCL_IPSEC_NULL:
+ if (rta_sec_era < RTA_SEC_ERA_2)
+ return -EINVAL;
+ /* no break */
+ case OP_PCL_IPSEC_DES_IV64:
+ case OP_PCL_IPSEC_DES:
+ case OP_PCL_IPSEC_3DES:
+ case OP_PCL_IPSEC_AES_CBC:
+ case OP_PCL_IPSEC_AES_CTR:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (proto_cls2) {
+ case OP_PCL_IPSEC_HMAC_NULL:
+ case OP_PCL_IPSEC_HMAC_MD5_96:
+ case OP_PCL_IPSEC_HMAC_SHA1_96:
+ case OP_PCL_IPSEC_AES_XCBC_MAC_96:
+ case OP_PCL_IPSEC_HMAC_MD5_128:
+ case OP_PCL_IPSEC_HMAC_SHA1_160:
+ case OP_PCL_IPSEC_AES_CMAC_96:
+ case OP_PCL_IPSEC_HMAC_SHA2_256_128:
+ case OP_PCL_IPSEC_HMAC_SHA2_384_192:
+ case OP_PCL_IPSEC_HMAC_SHA2_512_256:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_srtp_proto(uint16_t protoinfo)
+{
+ uint16_t proto_cls1 = protoinfo & OP_PCL_SRTP_CIPHER_MASK;
+ uint16_t proto_cls2 = protoinfo & OP_PCL_SRTP_AUTH_MASK;
+
+ switch (proto_cls1) {
+ case OP_PCL_SRTP_AES_CTR:
+ switch (proto_cls2) {
+ case OP_PCL_SRTP_HMAC_SHA1_160:
+ return 0;
+ }
+ /* no break */
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_macsec_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_MACSEC:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_wifi_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_WIFI:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_wimax_proto(uint16_t protoinfo)
+{
+ switch (protoinfo) {
+ case OP_PCL_WIMAX_OFDM:
+ case OP_PCL_WIMAX_OFDMA:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Allowed blob proto flags for each SEC Era */
+static const uint32_t proto_blob_flags[] = {
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM
+};
+
+static inline int
+__rta_blob_proto(uint16_t protoinfo)
+{
+ if (protoinfo & ~proto_blob_flags[rta_sec_era])
+ return -EINVAL;
+
+ switch (protoinfo & OP_PCL_BLOB_FORMAT_MASK) {
+ case OP_PCL_BLOB_FORMAT_NORMAL:
+ case OP_PCL_BLOB_FORMAT_MASTER_VER:
+ case OP_PCL_BLOB_FORMAT_TEST:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (protoinfo & OP_PCL_BLOB_REG_MASK) {
+ case OP_PCL_BLOB_AFHA_SBOX:
+ if (rta_sec_era < RTA_SEC_ERA_3)
+ return -EINVAL;
+ /* no break */
+ case OP_PCL_BLOB_REG_MEMORY:
+ case OP_PCL_BLOB_REG_KEY1:
+ case OP_PCL_BLOB_REG_KEY2:
+ case OP_PCL_BLOB_REG_SPLIT:
+ case OP_PCL_BLOB_REG_PKE:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_dlc_proto(uint16_t protoinfo)
+{
+ if ((rta_sec_era < RTA_SEC_ERA_2) &&
+ (protoinfo & (OP_PCL_PKPROT_DSA_MSG | OP_PCL_PKPROT_HASH_MASK |
+ OP_PCL_PKPROT_EKT_Z | OP_PCL_PKPROT_DECRYPT_Z |
+ OP_PCL_PKPROT_DECRYPT_PRI)))
+ return -EINVAL;
+
+ switch (protoinfo & OP_PCL_PKPROT_HASH_MASK) {
+ case OP_PCL_PKPROT_HASH_MD5:
+ case OP_PCL_PKPROT_HASH_SHA1:
+ case OP_PCL_PKPROT_HASH_SHA224:
+ case OP_PCL_PKPROT_HASH_SHA256:
+ case OP_PCL_PKPROT_HASH_SHA384:
+ case OP_PCL_PKPROT_HASH_SHA512:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int
+__rta_rsa_enc_proto(uint16_t protoinfo)
+{
+ switch (protoinfo & OP_PCL_RSAPROT_OP_MASK) {
+ case OP_PCL_RSAPROT_OP_ENC_F_IN:
+ if ((protoinfo & OP_PCL_RSAPROT_FFF_MASK) !=
+ OP_PCL_RSAPROT_FFF_RED)
+ return -EINVAL;
+ break;
+ case OP_PCL_RSAPROT_OP_ENC_F_OUT:
+ switch (protoinfo & OP_PCL_RSAPROT_FFF_MASK) {
+ case OP_PCL_RSAPROT_FFF_RED:
+ case OP_PCL_RSAPROT_FFF_ENC:
+ case OP_PCL_RSAPROT_FFF_EKT:
+ case OP_PCL_RSAPROT_FFF_TK_ENC:
+ case OP_PCL_RSAPROT_FFF_TK_EKT:
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline int
+__rta_rsa_dec_proto(uint16_t protoinfo)
+{
+ switch (protoinfo & OP_PCL_RSAPROT_OP_MASK) {
+ case OP_PCL_RSAPROT_OP_DEC_ND:
+ case OP_PCL_RSAPROT_OP_DEC_PQD:
+ case OP_PCL_RSAPROT_OP_DEC_PQDPDQC:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (protoinfo & OP_PCL_RSAPROT_PPP_MASK) {
+ case OP_PCL_RSAPROT_PPP_RED:
+ case OP_PCL_RSAPROT_PPP_ENC:
+ case OP_PCL_RSAPROT_PPP_EKT:
+ case OP_PCL_RSAPROT_PPP_TK_ENC:
+ case OP_PCL_RSAPROT_PPP_TK_EKT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (protoinfo & OP_PCL_RSAPROT_FMT_PKCSV15)
+ switch (protoinfo & OP_PCL_RSAPROT_FFF_MASK) {
+ case OP_PCL_RSAPROT_FFF_RED:
+ case OP_PCL_RSAPROT_FFF_ENC:
+ case OP_PCL_RSAPROT_FFF_EKT:
+ case OP_PCL_RSAPROT_FFF_TK_ENC:
+ case OP_PCL_RSAPROT_FFF_TK_EKT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * DKP Protocol - Restrictions on key (SRC,DST) combinations
+ * For e.g. key_in_out[0][0] = 1 means (SRC=IMM,DST=IMM) combination is allowed
+ */
+static const uint8_t key_in_out[4][4] = { {1, 0, 0, 0},
+ {1, 1, 1, 1},
+ {1, 0, 1, 0},
+ {1, 0, 0, 1} };
+
+static inline int
+__rta_dkp_proto(uint16_t protoinfo)
+{
+ int key_src = (protoinfo & OP_PCL_DKP_SRC_MASK) >> OP_PCL_DKP_SRC_SHIFT;
+ int key_dst = (protoinfo & OP_PCL_DKP_DST_MASK) >> OP_PCL_DKP_DST_SHIFT;
+
+ if (!key_in_out[key_src][key_dst]) {
+ pr_err("PROTO_DESC: Invalid DKP key (SRC,DST)\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static inline int
+__rta_3g_dcrc_proto(uint16_t protoinfo)
+{
+ if (rta_sec_era == RTA_SEC_ERA_7)
+ return -EINVAL;
+
+ switch (protoinfo) {
+ case OP_PCL_3G_DCRC_CRC7:
+ case OP_PCL_3G_DCRC_CRC11:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_3g_rlc_proto(uint16_t protoinfo)
+{
+ if (rta_sec_era == RTA_SEC_ERA_7)
+ return -EINVAL;
+
+ switch (protoinfo) {
+ case OP_PCL_3G_RLC_NULL:
+ case OP_PCL_3G_RLC_KASUMI:
+ case OP_PCL_3G_RLC_SNOW:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_lte_pdcp_proto(uint16_t protoinfo)
+{
+ if (rta_sec_era == RTA_SEC_ERA_7)
+ return -EINVAL;
+
+ switch (protoinfo) {
+ case OP_PCL_LTE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5)
+ break;
+ case OP_PCL_LTE_NULL:
+ case OP_PCL_LTE_SNOW:
+ case OP_PCL_LTE_AES:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline int
+__rta_lte_pdcp_mixed_proto(uint16_t protoinfo)
+{
+ switch (protoinfo & OP_PCL_LTE_MIXED_AUTH_MASK) {
+ case OP_PCL_LTE_MIXED_AUTH_NULL:
+ case OP_PCL_LTE_MIXED_AUTH_SNOW:
+ case OP_PCL_LTE_MIXED_AUTH_AES:
+ case OP_PCL_LTE_MIXED_AUTH_ZUC:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (protoinfo & OP_PCL_LTE_MIXED_ENC_MASK) {
+ case OP_PCL_LTE_MIXED_ENC_NULL:
+ case OP_PCL_LTE_MIXED_ENC_SNOW:
+ case OP_PCL_LTE_MIXED_ENC_AES:
+ case OP_PCL_LTE_MIXED_ENC_ZUC:
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+struct proto_map {
+ uint32_t optype;
+ uint32_t protid;
+ int (*protoinfo_func)(uint16_t);
+};
+
+static const struct proto_map proto_table[] = {
+/*1*/ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_SSL30_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS10_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS11_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS12_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DTLS10_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_IKEV1_PRF, __rta_ike_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_IKEV2_PRF, __rta_ike_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_PUBLICKEYPAIR, __rta_dlc_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DSASIGN, __rta_dlc_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DSAVERIFY, __rta_dlc_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_IPSEC, __rta_ipsec_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_SRTP, __rta_srtp_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_SSL30, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS10, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS11, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS12, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_DTLS10, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_MACSEC, __rta_macsec_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_WIFI, __rta_wifi_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_WIMAX, __rta_wimax_proto},
+/*21*/ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_BLOB, __rta_blob_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DIFFIEHELLMAN, __rta_dlc_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_RSAENCRYPT, __rta_rsa_enc_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_RSADECRYPT, __rta_rsa_dec_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_3G_DCRC, __rta_3g_dcrc_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_3G_RLC_PDU, __rta_3g_rlc_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_3G_RLC_SDU, __rta_3g_rlc_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_LTE_PDCP_USER, __rta_lte_pdcp_proto},
+/*29*/ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_LTE_PDCP_CTRL, __rta_lte_pdcp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_MD5, __rta_dkp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA1, __rta_dkp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA224, __rta_dkp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA256, __rta_dkp_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA384, __rta_dkp_proto},
+/*35*/ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DKP_SHA512, __rta_dkp_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_PUBLICKEYPAIR, __rta_dlc_proto},
+/*37*/ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_DSASIGN, __rta_dlc_proto},
+/*38*/ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ __rta_lte_pdcp_mixed_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_IPSEC_NEW, __rta_ipsec_proto},
+};
+
+/*
+ * Allowed OPERATION protocols for each SEC Era.
+ * Values represent the number of entries from proto_table[] that are supported.
+ */
+static const unsigned int proto_table_sz[] = {21, 29, 29, 29, 29, 35, 37, 39};
+
+static inline int
+rta_proto_operation(struct program *program, uint32_t optype,
+ uint32_t protid, uint16_t protoinfo)
+{
+ uint32_t opcode = CMD_OPERATION;
+ unsigned int i, found = 0;
+ uint32_t optype_tmp = optype;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ for (i = 0; i < proto_table_sz[rta_sec_era]; i++) {
+ /* clear last bit in optype to match also decap proto */
+ optype_tmp &= (uint32_t)~(1 << OP_TYPE_SHIFT);
+ if (optype_tmp == proto_table[i].optype) {
+ if (proto_table[i].protid == protid) {
+ /* nothing else to verify */
+ if (proto_table[i].protoinfo_func == NULL) {
+ found = 1;
+ break;
+ }
+ /* check protoinfo */
+ ret = (*proto_table[i].protoinfo_func)
+ (protoinfo);
+ if (ret < 0) {
+ pr_err("PROTO_DESC: Bad PROTO Type. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+ found = 1;
+ break;
+ }
+ }
+ }
+ if (!found) {
+ pr_err("PROTO_DESC: Operation Type Mismatch. SEC Program Line: %d\n",
+ program->current_pc);
+ goto err;
+ }
+
+ __rta_out32(program, opcode | optype | protid | protoinfo);
+ program->current_instruction++;
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static inline int
+rta_dkp_proto(struct program *program, uint32_t protid,
+ uint16_t key_src, uint16_t key_dst,
+ uint16_t keylen, uint64_t key,
+ enum rta_data_type key_type)
+{
+ unsigned int start_pc = program->current_pc;
+ unsigned int in_words = 0, out_words = 0;
+ int ret;
+
+ key_src &= OP_PCL_DKP_SRC_MASK;
+ key_dst &= OP_PCL_DKP_DST_MASK;
+ keylen &= OP_PCL_DKP_KEY_MASK;
+
+ ret = rta_proto_operation(program, OP_TYPE_UNI_PROTOCOL, protid,
+ key_src | key_dst | keylen);
+ if (ret < 0)
+ return ret;
+
+ if ((key_src == OP_PCL_DKP_SRC_PTR) ||
+ (key_src == OP_PCL_DKP_SRC_SGF)) {
+ __rta_out64(program, program->ps, key);
+ in_words = program->ps ? 2 : 1;
+ } else if (key_src == OP_PCL_DKP_SRC_IMM) {
+ __rta_inline_data(program, key, inline_flags(key_type), keylen);
+ in_words = (unsigned int)((keylen + 3) / 4);
+ }
+
+ if ((key_dst == OP_PCL_DKP_DST_PTR) ||
+ (key_dst == OP_PCL_DKP_DST_SGF)) {
+ out_words = in_words;
+ } else if (key_dst == OP_PCL_DKP_DST_IMM) {
+ out_words = split_key_len(protid) / 4;
+ }
+
+ if (out_words < in_words) {
+ pr_err("PROTO_DESC: DKP doesn't currently support a smaller descriptor\n");
+ program->first_error_pc = start_pc;
+ return -EINVAL;
+ }
+
+ /* If needed, reserve space in resulting descriptor for derived key */
+ program->current_pc += (out_words - in_words);
+
+ return (int)start_pc;
+}
+
+#endif /* __RTA_PROTOCOL_CMD_H__ */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
new file mode 100644
index 00000000..c12edb0c
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
@@ -0,0 +1,823 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_SEC_RUN_TIME_ASM_H__
+#define __RTA_SEC_RUN_TIME_ASM_H__
+
+#include "hw/desc.h"
+
+/* hw/compat.h is not delivered in kernel */
+#ifndef __KERNEL__
+#include "hw/compat.h"
+#endif
+
+/**
+ * enum rta_sec_era - SEC HW block revisions supported by the RTA library
+ * @RTA_SEC_ERA_1: SEC Era 1
+ * @RTA_SEC_ERA_2: SEC Era 2
+ * @RTA_SEC_ERA_3: SEC Era 3
+ * @RTA_SEC_ERA_4: SEC Era 4
+ * @RTA_SEC_ERA_5: SEC Era 5
+ * @RTA_SEC_ERA_6: SEC Era 6
+ * @RTA_SEC_ERA_7: SEC Era 7
+ * @RTA_SEC_ERA_8: SEC Era 8
+ * @MAX_SEC_ERA: maximum SEC HW block revision supported by RTA library
+ */
+enum rta_sec_era {
+ RTA_SEC_ERA_1,
+ RTA_SEC_ERA_2,
+ RTA_SEC_ERA_3,
+ RTA_SEC_ERA_4,
+ RTA_SEC_ERA_5,
+ RTA_SEC_ERA_6,
+ RTA_SEC_ERA_7,
+ RTA_SEC_ERA_8,
+ MAX_SEC_ERA = RTA_SEC_ERA_8
+};
+
+/**
+ * DEFAULT_SEC_ERA - the default value for the SEC era in case the user provides
+ * an unsupported value.
+ */
+#define DEFAULT_SEC_ERA MAX_SEC_ERA
+
+/**
+ * USER_SEC_ERA - translates the SEC Era from internal to user representation.
+ * @sec_era: SEC Era in internal (library) representation
+ */
+#define USER_SEC_ERA(sec_era) (sec_era + 1)
+
+/**
+ * INTL_SEC_ERA - translates the SEC Era from user representation to internal.
+ * @sec_era: SEC Era in user representation
+ */
+#define INTL_SEC_ERA(sec_era) (sec_era - 1)
+
+/**
+ * enum rta_jump_type - Types of action taken by JUMP command
+ * @LOCAL_JUMP: conditional jump to an offset within the descriptor buffer
+ * @FAR_JUMP: conditional jump to a location outside the descriptor buffer,
+ * indicated by the POINTER field after the JUMP command.
+ * @HALT: conditional halt - stop the execution of the current descriptor and
+ * writes PKHA / Math condition bits as status / error code.
+ * @HALT_STATUS: conditional halt with user-specified status - stop the
+ * execution of the current descriptor and writes the value of
+ * "LOCAL OFFSET" JUMP field as status / error code.
+ * @GOSUB: conditional subroutine call - similar to @LOCAL_JUMP, but also saves
+ * return address in the Return Address register; subroutine calls
+ * cannot be nested.
+ * @RETURN: conditional subroutine return - similar to @LOCAL_JUMP, but the
+ * offset is taken from the Return Address register.
+ * @LOCAL_JUMP_INC: similar to @LOCAL_JUMP, but increment the register specified
+ * in "SRC_DST" JUMP field before evaluating the jump
+ * condition.
+ * @LOCAL_JUMP_DEC: similar to @LOCAL_JUMP, but decrement the register specified
+ * in "SRC_DST" JUMP field before evaluating the jump
+ * condition.
+ */
+enum rta_jump_type {
+ LOCAL_JUMP,
+ FAR_JUMP,
+ HALT,
+ HALT_STATUS,
+ GOSUB,
+ RETURN,
+ LOCAL_JUMP_INC,
+ LOCAL_JUMP_DEC
+};
+
+/**
+ * enum rta_jump_cond - How test conditions are evaluated by JUMP command
+ * @ALL_TRUE: perform action if ALL selected conditions are true
+ * @ALL_FALSE: perform action if ALL selected conditions are false
+ * @ANY_TRUE: perform action if ANY of the selected conditions is true
+ * @ANY_FALSE: perform action if ANY of the selected conditions is false
+ */
+enum rta_jump_cond {
+ ALL_TRUE,
+ ALL_FALSE,
+ ANY_TRUE,
+ ANY_FALSE
+};
+
+/**
+ * enum rta_share_type - Types of sharing for JOB_HDR and SHR_HDR commands
+ * @SHR_NEVER: nothing is shared; descriptors can execute in parallel (i.e. no
+ * dependencies are allowed between them).
+ * @SHR_WAIT: shared descriptor and keys are shared once the descriptor sets
+ * "OK to share" in DECO Control Register (DCTRL).
+ * @SHR_SERIAL: shared descriptor and keys are shared once the descriptor has
+ * completed.
+ * @SHR_ALWAYS: shared descriptor is shared anytime after the descriptor is
+ * loaded.
+ * @SHR_DEFER: valid only for JOB_HDR; sharing type is the one specified
+ * in the shared descriptor associated with the job descriptor.
+ */
+enum rta_share_type {
+ SHR_NEVER,
+ SHR_WAIT,
+ SHR_SERIAL,
+ SHR_ALWAYS,
+ SHR_DEFER
+};
+
+/**
+ * enum rta_data_type - Indicates how is the data provided and how to include it
+ * in the descriptor.
+ * @RTA_DATA_PTR: Data is in memory and accessed by reference; data address is a
+ * physical (bus) address.
+ * @RTA_DATA_IMM: Data is inlined in descriptor and accessed as immediate data;
+ * data address is a virtual address.
+ * @RTA_DATA_IMM_DMA: (AIOP only) Data is inlined in descriptor and accessed as
+ * immediate data; data address is a physical (bus) address
+ * in external memory and CDMA is programmed to transfer the
+ * data into descriptor buffer being built in Workspace Area.
+ */
+enum rta_data_type {
+ RTA_DATA_PTR = 1,
+ RTA_DATA_IMM,
+ RTA_DATA_IMM_DMA
+};
+
+/* Registers definitions */
+enum rta_regs {
+ /* CCB Registers */
+ CONTEXT1 = 1,
+ CONTEXT2,
+ KEY1,
+ KEY2,
+ KEY1SZ,
+ KEY2SZ,
+ ICV1SZ,
+ ICV2SZ,
+ DATA1SZ,
+ DATA2SZ,
+ ALTDS1,
+ IV1SZ,
+ AAD1SZ,
+ MODE1,
+ MODE2,
+ CCTRL,
+ DCTRL,
+ ICTRL,
+ CLRW,
+ CSTAT,
+ IFIFO,
+ NFIFO,
+ OFIFO,
+ PKASZ,
+ PKBSZ,
+ PKNSZ,
+ PKESZ,
+ /* DECO Registers */
+ MATH0,
+ MATH1,
+ MATH2,
+ MATH3,
+ DESCBUF,
+ JOBDESCBUF,
+ SHAREDESCBUF,
+ DPOVRD,
+ DJQDA,
+ DSTAT,
+ DPID,
+ DJQCTRL,
+ ALTSOURCE,
+ SEQINSZ,
+ SEQOUTSZ,
+ VSEQINSZ,
+ VSEQOUTSZ,
+ /* PKHA Registers */
+ PKA,
+ PKN,
+ PKA0,
+ PKA1,
+ PKA2,
+ PKA3,
+ PKB,
+ PKB0,
+ PKB1,
+ PKB2,
+ PKB3,
+ PKE,
+ /* Pseudo registers */
+ AB1,
+ AB2,
+ ABD,
+ IFIFOABD,
+ IFIFOAB1,
+ IFIFOAB2,
+ AFHA_SBOX,
+ MDHA_SPLIT_KEY,
+ JOBSRC,
+ ZERO,
+ ONE,
+ AAD1,
+ IV1,
+ IV2,
+ MSG1,
+ MSG2,
+ MSG,
+ MSG_CKSUM,
+ MSGOUTSNOOP,
+ MSGINSNOOP,
+ ICV1,
+ ICV2,
+ SKIP,
+ NONE,
+ RNGOFIFO,
+ RNG,
+ IDFNS,
+ ODFNS,
+ NFIFOSZ,
+ SZ,
+ PAD,
+ SAD1,
+ AAD2,
+ BIT_DATA,
+ NFIFO_SZL,
+ NFIFO_SZM,
+ NFIFO_L,
+ NFIFO_M,
+ SZL,
+ SZM,
+ JOBDESCBUF_EFF,
+ SHAREDESCBUF_EFF,
+ METADATA,
+ GTR,
+ STR,
+ OFIFO_SYNC,
+ MSGOUTSNOOP_ALT
+};
+
+/* Command flags */
+#define FLUSH1 BIT(0)
+#define LAST1 BIT(1)
+#define LAST2 BIT(2)
+#define IMMED BIT(3)
+#define SGF BIT(4)
+#define VLF BIT(5)
+#define EXT BIT(6)
+#define CONT BIT(7)
+#define SEQ BIT(8)
+#define AIDF BIT(9)
+#define FLUSH2 BIT(10)
+#define CLASS1 BIT(11)
+#define CLASS2 BIT(12)
+#define BOTH BIT(13)
+
+/**
+ * DCOPY - (AIOP only) command param is pointer to external memory
+ *
+ * CDMA must be used to transfer the key via DMA into Workspace Area.
+ * Valid only in combination with IMMED flag.
+ */
+#define DCOPY BIT(30)
+
+#define COPY BIT(31) /* command param is pointer (not immediate)
+ * valid only in combination when IMMED
+ */
+
+#define __COPY_MASK (COPY | DCOPY)
+
+/* SEQ IN/OUT PTR Command specific flags */
+#define RBS BIT(16)
+#define INL BIT(17)
+#define PRE BIT(18)
+#define RTO BIT(19)
+#define RJD BIT(20)
+#define SOP BIT(21)
+#define RST BIT(22)
+#define EWS BIT(23)
+
+#define ENC BIT(14) /* Encrypted Key */
+#define EKT BIT(15) /* AES CCM Encryption (default is
+ * AES ECB Encryption)
+ */
+#define TK BIT(16) /* Trusted Descriptor Key (default is
+ * Job Descriptor Key)
+ */
+#define NWB BIT(17) /* No Write Back Key */
+#define PTS BIT(18) /* Plaintext Store */
+
+/* HEADER Command specific flags */
+#define RIF BIT(16)
+#define DNR BIT(17)
+#define CIF BIT(18)
+#define PD BIT(19)
+#define RSMS BIT(20)
+#define TD BIT(21)
+#define MTD BIT(22)
+#define REO BIT(23)
+#define SHR BIT(24)
+#define SC BIT(25)
+/* Extended HEADER specific flags */
+#define DSV BIT(7)
+#define DSEL_MASK 0x00000007 /* DECO Select */
+#define FTD BIT(8)
+
+/* JUMP Command specific flags */
+#define NIFP BIT(20)
+#define NIP BIT(21)
+#define NOP BIT(22)
+#define NCP BIT(23)
+#define CALM BIT(24)
+
+#define MATH_Z BIT(25)
+#define MATH_N BIT(26)
+#define MATH_NV BIT(27)
+#define MATH_C BIT(28)
+#define PK_0 BIT(29)
+#define PK_GCD_1 BIT(30)
+#define PK_PRIME BIT(31)
+#define SELF BIT(0)
+#define SHRD BIT(1)
+#define JQP BIT(2)
+
+/* NFIFOADD specific flags */
+#define PAD_ZERO BIT(16)
+#define PAD_NONZERO BIT(17)
+#define PAD_INCREMENT BIT(18)
+#define PAD_RANDOM BIT(19)
+#define PAD_ZERO_N1 BIT(20)
+#define PAD_NONZERO_0 BIT(21)
+#define PAD_N1 BIT(23)
+#define PAD_NONZERO_N BIT(24)
+#define OC BIT(25)
+#define BM BIT(26)
+#define PR BIT(27)
+#define PS BIT(28)
+#define BP BIT(29)
+
+/* MOVE Command specific flags */
+#define WAITCOMP BIT(16)
+#define SIZE_WORD BIT(17)
+#define SIZE_BYTE BIT(18)
+#define SIZE_DWORD BIT(19)
+
+/* MATH command specific flags */
+#define IFB MATH_IFB
+#define NFU MATH_NFU
+#define STL MATH_STL
+#define SSEL MATH_SSEL
+#define SWP MATH_SWP
+#define IMMED2 BIT(31)
+
+/**
+ * struct program - descriptor buffer management structure
+ * @current_pc: current offset in descriptor
+ * @current_instruction: current instruction in descriptor
+ * @first_error_pc: offset of the first error in descriptor
+ * @start_pc: start offset in descriptor buffer
+ * @buffer: buffer carrying descriptor
+ * @shrhdr: shared descriptor header
+ * @jobhdr: job descriptor header
+ * @ps: pointer fields size; if ps is true, pointers will be 36bits in
+ * length; if ps is false, pointers will be 32bits in length
+ * @bswap: if true, perform byte swap on a 4-byte boundary
+ */
+struct program {
+ unsigned int current_pc;
+ unsigned int current_instruction;
+ unsigned int first_error_pc;
+ unsigned int start_pc;
+ uint32_t *buffer;
+ uint32_t *shrhdr;
+ uint32_t *jobhdr;
+ bool ps;
+ bool bswap;
+};
+
+static inline void
+rta_program_cntxt_init(struct program *program,
+ uint32_t *buffer, unsigned int offset)
+{
+ program->current_pc = 0;
+ program->current_instruction = 0;
+ program->first_error_pc = 0;
+ program->start_pc = offset;
+ program->buffer = buffer;
+ program->shrhdr = NULL;
+ program->jobhdr = NULL;
+ program->ps = false;
+ program->bswap = false;
+}
+
+static inline int
+rta_program_finalize(struct program *program)
+{
+ /* Descriptor is usually not allowed to go beyond 64 words size */
+ if (program->current_pc > MAX_CAAM_DESCSIZE)
+ pr_warn("Descriptor Size exceeded max limit of 64 words\n");
+
+ /* Descriptor is erroneous */
+ if (program->first_error_pc) {
+ pr_err("Descriptor creation error\n");
+ return -EINVAL;
+ }
+
+ /* Update descriptor length in shared and job descriptor headers */
+ if (program->shrhdr != NULL)
+ *program->shrhdr |= program->bswap ?
+ swab32(program->current_pc) :
+ program->current_pc;
+ else if (program->jobhdr != NULL)
+ *program->jobhdr |= program->bswap ?
+ swab32(program->current_pc) :
+ program->current_pc;
+
+ return (int)program->current_pc;
+}
+
+static inline unsigned int
+rta_program_set_36bit_addr(struct program *program)
+{
+ program->ps = true;
+ return program->current_pc;
+}
+
+static inline unsigned int
+rta_program_set_bswap(struct program *program)
+{
+ program->bswap = true;
+ return program->current_pc;
+}
+
+static inline void
+__rta_out32(struct program *program, uint32_t val)
+{
+ program->buffer[program->current_pc] = program->bswap ?
+ swab32(val) : val;
+ program->current_pc++;
+}
+
+static inline void
+__rta_out_be32(struct program *program, uint32_t val)
+{
+ program->buffer[program->current_pc] = cpu_to_be32(val);
+ program->current_pc++;
+}
+
+static inline void
+__rta_out_le32(struct program *program, uint32_t val)
+{
+ program->buffer[program->current_pc] = cpu_to_le32(val);
+ program->current_pc++;
+}
+
+static inline void
+__rta_out64(struct program *program, bool is_ext, uint64_t val)
+{
+ if (is_ext) {
+ /*
+ * Since we are guaranteed only a 4-byte alignment in the
+ * descriptor buffer, we have to do 2 x 32-bit (word) writes.
+ * For the order of the 2 words to be correct, we need to
+ * take into account the endianness of the CPU.
+ */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ __rta_out32(program, program->bswap ? lower_32_bits(val) :
+ upper_32_bits(val));
+
+ __rta_out32(program, program->bswap ? upper_32_bits(val) :
+ lower_32_bits(val));
+#else
+ __rta_out32(program, program->bswap ? upper_32_bits(val) :
+ lower_32_bits(val));
+
+ __rta_out32(program, program->bswap ? lower_32_bits(val) :
+ upper_32_bits(val));
+#endif
+ } else {
+ __rta_out32(program, lower_32_bits(val));
+ }
+}
+
+static inline unsigned int
+rta_word(struct program *program, uint32_t val)
+{
+ unsigned int start_pc = program->current_pc;
+
+ __rta_out32(program, val);
+
+ return start_pc;
+}
+
+static inline unsigned int
+rta_dword(struct program *program, uint64_t val)
+{
+ unsigned int start_pc = program->current_pc;
+
+ __rta_out64(program, true, val);
+
+ return start_pc;
+}
+
+static inline uint32_t
+inline_flags(enum rta_data_type data_type)
+{
+ switch (data_type) {
+ case RTA_DATA_PTR:
+ return 0;
+ case RTA_DATA_IMM:
+ return IMMED | COPY;
+ case RTA_DATA_IMM_DMA:
+ return IMMED | DCOPY;
+ default:
+ /* warn and default to RTA_DATA_PTR */
+ pr_warn("RTA: defaulting to RTA_DATA_PTR parameter type\n");
+ return 0;
+ }
+}
+
+static inline unsigned int
+rta_copy_data(struct program *program, uint8_t *data, unsigned int length)
+{
+ unsigned int i;
+ unsigned int start_pc = program->current_pc;
+ uint8_t *tmp = (uint8_t *)&program->buffer[program->current_pc];
+
+ for (i = 0; i < length; i++)
+ *tmp++ = data[i];
+ program->current_pc += (length + 3) / 4;
+
+ return start_pc;
+}
+
+#if defined(__EWL__) && defined(AIOP)
+static inline void
+__rta_dma_data(void *ws_dst, uint64_t ext_address, uint16_t size)
+{ cdma_read(ws_dst, ext_address, size); }
+#else
+static inline void
+__rta_dma_data(void *ws_dst __maybe_unused,
+ uint64_t ext_address __maybe_unused,
+ uint16_t size __maybe_unused)
+{ pr_warn("RTA: DCOPY not supported, DMA will be skipped\n"); }
+#endif /* defined(__EWL__) && defined(AIOP) */
+
+static inline void
+__rta_inline_data(struct program *program, uint64_t data,
+ uint32_t copy_data, uint32_t length)
+{
+ if (!copy_data) {
+ __rta_out64(program, length > 4, data);
+ } else if (copy_data & COPY) {
+ uint8_t *tmp = (uint8_t *)&program->buffer[program->current_pc];
+ uint32_t i;
+
+ for (i = 0; i < length; i++)
+ *tmp++ = ((uint8_t *)(uintptr_t)data)[i];
+ program->current_pc += ((length + 3) / 4);
+ } else if (copy_data & DCOPY) {
+ __rta_dma_data(&program->buffer[program->current_pc], data,
+ (uint16_t)length);
+ program->current_pc += ((length + 3) / 4);
+ }
+}
+
+static inline unsigned int
+rta_desc_len(uint32_t *buffer)
+{
+ if ((*buffer & CMD_MASK) == CMD_DESC_HDR)
+ return *buffer & HDR_DESCLEN_MASK;
+ else
+ return *buffer & HDR_DESCLEN_SHR_MASK;
+}
+
+static inline unsigned int
+rta_desc_bytes(uint32_t *buffer)
+{
+ return (unsigned int)(rta_desc_len(buffer) * CAAM_CMD_SZ);
+}
+
+/**
+ * split_key_len - Compute MDHA split key length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* or
+ * OP_PCLID_DKP_* - MD5, SHA1, SHA224, SHA256, SHA384, SHA512.
+ *
+ * Return: MDHA split key length
+ */
+static inline uint32_t
+split_key_len(uint32_t hash)
+{
+ /* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+ static const uint8_t mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+ uint32_t idx;
+
+ idx = (hash & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT;
+
+ return (uint32_t)(mdpadlen[idx] * 2);
+}
+
+/**
+ * split_key_pad_len - Compute MDHA split key pad length for a given algorithm
+ * @hash: Hashing algorithm selection, one of OP_ALG_ALGSEL_* - MD5, SHA1,
+ * SHA224, SHA384, SHA512.
+ *
+ * Return: MDHA split key pad length
+ */
+static inline uint32_t
+split_key_pad_len(uint32_t hash)
+{
+ return ALIGN(split_key_len(hash), 16);
+}
+
+static inline unsigned int
+rta_set_label(struct program *program)
+{
+ return program->current_pc + program->start_pc;
+}
+
+static inline int
+rta_patch_move(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~MOVE_OFFSET_MASK;
+ opcode |= (new_ref << (MOVE_OFFSET_SHIFT + 2)) & MOVE_OFFSET_MASK;
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_jmp(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~JUMP_OFFSET_MASK;
+ opcode |= (new_ref - (line + program->start_pc)) & JUMP_OFFSET_MASK;
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_header(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~HDR_START_IDX_MASK;
+ opcode |= (new_ref << HDR_START_IDX_SHIFT) & HDR_START_IDX_MASK;
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_load(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = (bswap ? swab32(program->buffer[line]) :
+ program->buffer[line]) & (uint32_t)~LDST_OFFSET_MASK;
+
+ if (opcode & (LDST_SRCDST_WORD_DESCBUF | LDST_CLASS_DECO))
+ opcode |= (new_ref << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK;
+ else
+ opcode |= (new_ref << (LDST_OFFSET_SHIFT + 2)) &
+ LDST_OFFSET_MASK;
+
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_store(struct program *program, int line, unsigned int new_ref)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~LDST_OFFSET_MASK;
+
+ switch (opcode & LDST_SRCDST_MASK) {
+ case LDST_SRCDST_WORD_DESCBUF:
+ case LDST_SRCDST_WORD_DESCBUF_JOB:
+ case LDST_SRCDST_WORD_DESCBUF_SHARED:
+ case LDST_SRCDST_WORD_DESCBUF_JOB_WE:
+ case LDST_SRCDST_WORD_DESCBUF_SHARED_WE:
+ opcode |= ((new_ref) << LDST_OFFSET_SHIFT) & LDST_OFFSET_MASK;
+ break;
+ default:
+ opcode |= (new_ref << (LDST_OFFSET_SHIFT + 2)) &
+ LDST_OFFSET_MASK;
+ }
+
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+rta_patch_raw(struct program *program, int line, unsigned int mask,
+ unsigned int new_val)
+{
+ uint32_t opcode;
+ bool bswap = program->bswap;
+
+ if (line < 0)
+ return -EINVAL;
+
+ opcode = bswap ? swab32(program->buffer[line]) : program->buffer[line];
+
+ opcode &= (uint32_t)~mask;
+ opcode |= new_val & mask;
+ program->buffer[line] = bswap ? swab32(opcode) : opcode;
+
+ return 0;
+}
+
+static inline int
+__rta_map_opcode(uint32_t name, const uint32_t (*map_table)[2],
+ unsigned int num_of_entries, uint32_t *val)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_of_entries; i++)
+ if (map_table[i][0] == name) {
+ *val = map_table[i][1];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline void
+__rta_map_flags(uint32_t flags, const uint32_t (*flags_table)[2],
+ unsigned int num_of_entries, uint32_t *opcode)
+{
+ unsigned int i;
+
+ for (i = 0; i < num_of_entries; i++) {
+ if (flags_table[i][0] & flags)
+ *opcode |= flags_table[i][1];
+ }
+}
+
+#endif /* __RTA_SEC_RUN_TIME_ASM_H__ */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h
new file mode 100644
index 00000000..8d421a5d
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/seq_in_out_ptr_cmd.h
@@ -0,0 +1,208 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_SEQ_IN_OUT_PTR_CMD_H__
+#define __RTA_SEQ_IN_OUT_PTR_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+/* Allowed SEQ IN PTR flags for each SEC Era. */
+static const uint32_t seq_in_ptr_flags[] = {
+ RBS | INL | SGF | PRE | EXT | RTO,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP,
+ RBS | INL | SGF | PRE | EXT | RTO | RJD | SOP
+};
+
+/* Allowed SEQ OUT PTR flags for each SEC Era. */
+static const uint32_t seq_out_ptr_flags[] = {
+ SGF | PRE | EXT,
+ SGF | PRE | EXT | RTO,
+ SGF | PRE | EXT | RTO,
+ SGF | PRE | EXT | RTO,
+ SGF | PRE | EXT | RTO | RST | EWS,
+ SGF | PRE | EXT | RTO | RST | EWS,
+ SGF | PRE | EXT | RTO | RST | EWS,
+ SGF | PRE | EXT | RTO | RST | EWS
+};
+
+static inline int
+rta_seq_in_ptr(struct program *program, uint64_t src,
+ uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = CMD_SEQ_IN_PTR;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ /* Parameters checking */
+ if ((flags & RTO) && (flags & PRE)) {
+ pr_err("SEQ IN PTR: Invalid usage of RTO and PRE flags\n");
+ goto err;
+ }
+ if (flags & ~seq_in_ptr_flags[rta_sec_era]) {
+ pr_err("SEQ IN PTR: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+ if ((flags & INL) && (flags & RJD)) {
+ pr_err("SEQ IN PTR: Invalid usage of INL and RJD flags\n");
+ goto err;
+ }
+ if ((src) && (flags & (SOP | RTO | PRE))) {
+ pr_err("SEQ IN PTR: Invalid usage of RTO or PRE flag\n");
+ goto err;
+ }
+ if ((flags & SOP) && (flags & (RBS | PRE | RTO | EXT))) {
+ pr_err("SEQ IN PTR: Invalid usage of SOP and (RBS or PRE or RTO or EXT) flags\n");
+ goto err;
+ }
+
+ /* write flag fields */
+ if (flags & RBS)
+ opcode |= SQIN_RBS;
+ if (flags & INL)
+ opcode |= SQIN_INL;
+ if (flags & SGF)
+ opcode |= SQIN_SGF;
+ if (flags & PRE)
+ opcode |= SQIN_PRE;
+ if (flags & RTO)
+ opcode |= SQIN_RTO;
+ if (flags & RJD)
+ opcode |= SQIN_RJD;
+ if (flags & SOP)
+ opcode |= SQIN_SOP;
+ if ((length >> 16) || (flags & EXT)) {
+ if (flags & SOP) {
+ pr_err("SEQ IN PTR: Invalid usage of SOP and EXT flags\n");
+ goto err;
+ }
+
+ opcode |= SQIN_EXT;
+ } else {
+ opcode |= length & SQIN_LEN_MASK;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* write pointer or immediate data field */
+ if (!(opcode & (SQIN_PRE | SQIN_RTO | SQIN_SOP)))
+ __rta_out64(program, program->ps, src);
+
+ /* write extended length field */
+ if (opcode & SQIN_EXT)
+ __rta_out32(program, length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+static inline int
+rta_seq_out_ptr(struct program *program, uint64_t dst,
+ uint32_t length, uint32_t flags)
+{
+ uint32_t opcode = CMD_SEQ_OUT_PTR;
+ unsigned int start_pc = program->current_pc;
+ int ret = -EINVAL;
+
+ /* Parameters checking */
+ if (flags & ~seq_out_ptr_flags[rta_sec_era]) {
+ pr_err("SEQ OUT PTR: Flag(s) not supported by SEC Era %d\n",
+ USER_SEC_ERA(rta_sec_era));
+ goto err;
+ }
+ if ((flags & RTO) && (flags & PRE)) {
+ pr_err("SEQ OUT PTR: Invalid usage of RTO and PRE flags\n");
+ goto err;
+ }
+ if ((dst) && (flags & (RTO | PRE))) {
+ pr_err("SEQ OUT PTR: Invalid usage of RTO or PRE flag\n");
+ goto err;
+ }
+ if ((flags & RST) && !(flags & RTO)) {
+ pr_err("SEQ OUT PTR: RST flag must be used with RTO flag\n");
+ goto err;
+ }
+
+ /* write flag fields */
+ if (flags & SGF)
+ opcode |= SQOUT_SGF;
+ if (flags & PRE)
+ opcode |= SQOUT_PRE;
+ if (flags & RTO)
+ opcode |= SQOUT_RTO;
+ if (flags & RST)
+ opcode |= SQOUT_RST;
+ if (flags & EWS)
+ opcode |= SQOUT_EWS;
+ if ((length >> 16) || (flags & EXT))
+ opcode |= SQOUT_EXT;
+ else
+ opcode |= length & SQOUT_LEN_MASK;
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ /* write pointer or immediate data field */
+ if (!(opcode & (SQOUT_PRE | SQOUT_RTO)))
+ __rta_out64(program, program->ps, dst);
+
+ /* write extended length field */
+ if (opcode & SQOUT_EXT)
+ __rta_out32(program, length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_SEQ_IN_OUT_PTR_CMD_H__ */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h
new file mode 100644
index 00000000..ac4f3ff8
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/signature_cmd.h
@@ -0,0 +1,75 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_SIGNATURE_CMD_H__
+#define __RTA_SIGNATURE_CMD_H__
+
+static inline int
+rta_signature(struct program *program, uint32_t sign_type)
+{
+ uint32_t opcode = CMD_SIGNATURE;
+ unsigned int start_pc = program->current_pc;
+
+ switch (sign_type) {
+ case (SIGN_TYPE_FINAL):
+ case (SIGN_TYPE_FINAL_RESTORE):
+ case (SIGN_TYPE_FINAL_NONZERO):
+ case (SIGN_TYPE_IMM_2):
+ case (SIGN_TYPE_IMM_3):
+ case (SIGN_TYPE_IMM_4):
+ opcode |= sign_type;
+ break;
+ default:
+ pr_err("SIGNATURE Command: Invalid type selection\n");
+ goto err;
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return -EINVAL;
+}
+
+#endif /* __RTA_SIGNATURE_CMD_H__ */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h
new file mode 100644
index 00000000..8fd01801
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/hw/rta/store_cmd.h
@@ -0,0 +1,185 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2008-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTA_STORE_CMD_H__
+#define __RTA_STORE_CMD_H__
+
+extern enum rta_sec_era rta_sec_era;
+
+static const uint32_t store_src_table[][2] = {
+/*1*/ { KEY1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_KEYSZ_REG },
+ { KEY2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_KEYSZ_REG },
+ { DJQDA, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_JQDAR },
+ { MODE1, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_MODE_REG },
+ { MODE2, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_MODE_REG },
+ { DJQCTRL, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_JQCTRL },
+ { DATA1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_DATASZ_REG },
+ { DATA2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_DATASZ_REG },
+ { DSTAT, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_STAT },
+ { ICV1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_ICVSZ_REG },
+ { ICV2SZ, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_ICVSZ_REG },
+ { DPID, LDST_CLASS_DECO | LDST_SRCDST_WORD_PID },
+ { CCTRL, LDST_SRCDST_WORD_CHACTRL },
+ { ICTRL, LDST_SRCDST_WORD_IRQCTRL },
+ { CLRW, LDST_SRCDST_WORD_CLRW },
+ { MATH0, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH0 },
+ { CSTAT, LDST_SRCDST_WORD_STAT },
+ { MATH1, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH1 },
+ { MATH2, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH2 },
+ { AAD1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_DECO_AAD_SZ },
+ { MATH3, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH3 },
+ { IV1SZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_CLASS1_IV_SZ },
+ { PKASZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_A_SZ },
+ { PKBSZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_B_SZ },
+ { PKESZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_E_SZ },
+ { PKNSZ, LDST_CLASS_1_CCB | LDST_SRCDST_WORD_PKHA_N_SZ },
+ { CONTEXT1, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT },
+ { CONTEXT2, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT },
+ { DESCBUF, LDST_CLASS_DECO | LDST_SRCDST_WORD_DESCBUF },
+/*30*/ { JOBDESCBUF, LDST_CLASS_DECO | LDST_SRCDST_WORD_DESCBUF_JOB },
+ { SHAREDESCBUF, LDST_CLASS_DECO | LDST_SRCDST_WORD_DESCBUF_SHARED },
+/*32*/ { JOBDESCBUF_EFF, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DESCBUF_JOB_WE },
+ { SHAREDESCBUF_EFF, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DESCBUF_SHARED_WE },
+/*34*/ { GTR, LDST_CLASS_DECO | LDST_SRCDST_WORD_GTR },
+ { STR, LDST_CLASS_DECO | LDST_SRCDST_WORD_STR }
+};
+
+/*
+ * Allowed STORE sources for each SEC ERA.
+ * Values represent the number of entries from source_src_table[] that are
+ * supported.
+ */
+static const unsigned int store_src_table_sz[] = {29, 31, 33, 33,
+ 33, 33, 35, 35};
+
+static inline int
+rta_store(struct program *program, uint64_t src,
+ uint16_t offset, uint64_t dst, uint32_t length,
+ uint32_t flags)
+{
+ uint32_t opcode = 0, val;
+ int ret = -EINVAL;
+ unsigned int start_pc = program->current_pc;
+
+ if (flags & SEQ)
+ opcode = CMD_SEQ_STORE;
+ else
+ opcode = CMD_STORE;
+
+ /* parameters check */
+ if ((flags & IMMED) && (flags & SGF)) {
+ pr_err("STORE: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+ if ((flags & IMMED) && (offset != 0)) {
+ pr_err("STORE: Invalid flag. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if ((flags & SEQ) && ((src == JOBDESCBUF) || (src == SHAREDESCBUF) ||
+ (src == JOBDESCBUF_EFF) ||
+ (src == SHAREDESCBUF_EFF))) {
+ pr_err("STORE: Invalid SRC type. SEC PC: %d; Instr: %d\n",
+ program->current_pc, program->current_instruction);
+ goto err;
+ }
+
+ if (flags & IMMED)
+ opcode |= LDST_IMM;
+
+ if ((flags & SGF) || (flags & VLF))
+ opcode |= LDST_VLF;
+
+ /*
+ * source for data to be stored can be specified as:
+ * - register location; set in src field[9-15];
+ * - if IMMED flag is set, data is set in value field [0-31];
+ * user can give this value as actual value or pointer to data
+ */
+ if (!(flags & IMMED)) {
+ ret = __rta_map_opcode((uint32_t)src, store_src_table,
+ store_src_table_sz[rta_sec_era], &val);
+ if (ret < 0) {
+ pr_err("STORE: Invalid source. SEC PC: %d; Instr: %d\n",
+ program->current_pc,
+ program->current_instruction);
+ goto err;
+ }
+ opcode |= val;
+ }
+
+ /* DESC BUFFER: length / offset values are specified in 4-byte words */
+ if ((src == DESCBUF) || (src == JOBDESCBUF) || (src == SHAREDESCBUF) ||
+ (src == JOBDESCBUF_EFF) || (src == SHAREDESCBUF_EFF)) {
+ opcode |= (length >> 2);
+ opcode |= (uint32_t)((offset >> 2) << LDST_OFFSET_SHIFT);
+ } else {
+ opcode |= length;
+ opcode |= (uint32_t)(offset << LDST_OFFSET_SHIFT);
+ }
+
+ __rta_out32(program, opcode);
+ program->current_instruction++;
+
+ if ((src == JOBDESCBUF) || (src == SHAREDESCBUF) ||
+ (src == JOBDESCBUF_EFF) || (src == SHAREDESCBUF_EFF))
+ return (int)start_pc;
+
+ /* for STORE, a pointer to where the data will be stored if needed */
+ if (!(flags & SEQ))
+ __rta_out64(program, program->ps, dst);
+
+ /* for IMMED data, place the data here */
+ if (flags & IMMED)
+ __rta_inline_data(program, src, flags & __COPY_MASK, length);
+
+ return (int)start_pc;
+
+ err:
+ program->first_error_pc = start_pc;
+ program->current_instruction++;
+ return ret;
+}
+
+#endif /* __RTA_STORE_CMD_H__ */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/mc/dpseci.c b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/mc/dpseci.c
new file mode 100644
index 00000000..6b3411f0
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/mc/dpseci.c
@@ -0,0 +1,558 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpseci.h>
+#include <fsl_dpseci_cmd.h>
+
+int
+dpseci_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpseci_id,
+ uint16_t *token)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ DPSECI_CMD_OPEN(cmd, dpseci_id);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+
+ return 0;
+}
+
+int
+dpseci_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+int
+dpseci_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpseci_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ DPSECI_CMD_CREATE(cmd, cfg);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ CMD_CREATE_RSP_GET_OBJ_ID_PARAM0(cmd, *obj_id);
+
+ return 0;
+}
+
+int
+dpseci_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ /* set object id to destroy */
+ CMD_DESTROY_SET_OBJ_ID_PARAM0(cmd, object_id);
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+int
+dpseci_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+int
+dpseci_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+int
+dpseci_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPSECI_RSP_IS_ENABLED(cmd, *en);
+
+ return 0;
+}
+
+int
+dpseci_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+int
+dpseci_get_irq(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ int *type,
+ struct dpseci_irq_cfg *irq_cfg)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ,
+ cmd_flags,
+ token);
+ DPSECI_CMD_GET_IRQ(cmd, irq_index);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPSECI_RSP_GET_IRQ(cmd, *type, irq_cfg);
+
+ return 0;
+}
+
+int
+dpseci_set_irq(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ struct dpseci_irq_cfg *irq_cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ,
+ cmd_flags,
+ token);
+ DPSECI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+int
+dpseci_get_irq_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint8_t *en)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ DPSECI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPSECI_RSP_GET_IRQ_ENABLE(cmd, *en);
+
+ return 0;
+}
+
+int
+dpseci_set_irq_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint8_t en)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ DPSECI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+int
+dpseci_get_irq_mask(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t *mask)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
+ cmd_flags,
+ token);
+ DPSECI_CMD_GET_IRQ_MASK(cmd, irq_index);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPSECI_RSP_GET_IRQ_MASK(cmd, *mask);
+
+ return 0;
+}
+
+int
+dpseci_set_irq_mask(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t mask)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
+ cmd_flags,
+ token);
+ DPSECI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+int
+dpseci_get_irq_status(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t *status)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
+ cmd_flags,
+ token);
+ DPSECI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPSECI_RSP_GET_IRQ_STATUS(cmd, *status);
+
+ return 0;
+}
+
+int
+dpseci_clear_irq_status(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t status)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags,
+ token);
+ DPSECI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+int
+dpseci_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPSECI_RSP_GET_ATTR(cmd, attr);
+
+ return 0;
+}
+
+int
+dpseci_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ const struct dpseci_rx_queue_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
+ cmd_flags,
+ token);
+ DPSECI_CMD_SET_RX_QUEUE(cmd, queue, cfg);
+
+ /* send command to mc */
+ return mc_send_command(mc_io, &cmd);
+}
+
+int
+dpseci_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_rx_queue_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
+ cmd_flags,
+ token);
+ DPSECI_CMD_GET_RX_QUEUE(cmd, queue);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPSECI_RSP_GET_RX_QUEUE(cmd, attr);
+
+ return 0;
+}
+
+int
+dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_tx_queue_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
+ cmd_flags,
+ token);
+ DPSECI_CMD_GET_TX_QUEUE(cmd, queue);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPSECI_RSP_GET_TX_QUEUE(cmd, attr);
+
+ return 0;
+}
+
+int
+dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPSECI_RSP_GET_SEC_ATTR(cmd, attr);
+
+ return 0;
+}
+
+int
+dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_counters *counters)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
+ cmd_flags,
+ token);
+
+ /* send command to mc */
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPSECI_RSP_GET_SEC_COUNTERS(cmd, counters);
+
+ return 0;
+}
+
+int
+dpseci_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ DPSECI_RSP_GET_API_VERSION(cmd, *major_ver, *minor_ver);
+
+ return 0;
+}
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
new file mode 100644
index 00000000..0fb47f73
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
@@ -0,0 +1,746 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __FSL_DPSECI_H
+#define __FSL_DPSECI_H
+
+/* Data Path SEC Interface API
+ * Contains initialization APIs and runtime control APIs for DPSECI
+ */
+
+struct fsl_mc_io;
+
+/**
+ * General DPSECI macros
+ */
+
+/**
+ * Maximum number of Tx/Rx priorities per DPSECI object
+ */
+#define DPSECI_PRIO_NUM 8
+
+/**
+ * All queues considered; see dpseci_set_rx_queue()
+ */
+#define DPSECI_ALL_QUEUES (uint8_t)(-1)
+
+/**
+ * dpseci_open() - Open a control session for the specified object
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpseci_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param dpseci_id DPSECI unique ID
+ * @param token Returned token; use in subsequent API calls
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpseci_id,
+ uint16_t *token);
+
+/**
+ * dpseci_close() - Close the control session of the object
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpseci_cfg - Structure representing DPSECI configuration
+ */
+struct dpseci_cfg {
+ uint8_t num_tx_queues; /* num of queues towards the SEC */
+ uint8_t num_rx_queues; /* num of queues back from the SEC */
+ uint8_t priorities[DPSECI_PRIO_NUM];
+ /**< Priorities for the SEC hardware processing;
+ * each place in the array is the priority of the tx queue
+ * towards the SEC,
+ * valid priorities are configured with values 1-8;
+ */
+};
+
+/**
+ * dpseci_create() - Create the DPSECI object
+ * Create the DPSECI object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param dprc_token Parent container token; '0' for default container
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param cfg Configuration structure
+ * @param obj_id returned object id
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpseci_cfg *cfg,
+ uint32_t *obj_id);
+
+/**
+ * dpseci_destroy() - Destroy the DPSECI object and release all its resources.
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param dprc_token Parent container token; '0' for default container
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param object_id The object id; it must be a valid id within the
+ * container that created this object;
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
+
+/**
+ * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames.
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames.
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * dpseci_is_enabled() - Check if the DPSECI is enabled.
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param en Returns '1' if object is enabled; '0' otherwise
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+/**
+ * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpseci_irq_cfg - IRQ configuration
+ */
+struct dpseci_irq_cfg {
+ uint64_t addr;
+ /* Address that must be written to signal a message-based interrupt */
+ uint32_t val;
+ /* Value to write into irq_addr address */
+ int irq_num;
+ /* A user defined number associated with this IRQ */
+};
+
+/**
+ * dpseci_set_irq() - Set IRQ information for the DPSECI to trigger an interrupt
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param irq_index Identifies the interrupt index to configure
+ * @param irq_cfg IRQ configuration
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_set_irq(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ struct dpseci_irq_cfg *irq_cfg);
+
+/**
+ * dpseci_get_irq() - Get IRQ information from the DPSECI
+ *
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param irq_index The interrupt index to configure
+ * @param type Interrupt type: 0 represents message interrupt
+ * type (both irq_addr and irq_val are valid)
+ * @param irq_cfg IRQ attributes
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_get_irq(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ int *type,
+ struct dpseci_irq_cfg *irq_cfg);
+
+/**
+ * dpseci_set_irq_enable() - Set overall interrupt state.
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt
+ *
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param irq_index The interrupt index to configure
+ * @param en Interrupt state - enable = 1, disable = 0
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_set_irq_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint8_t en);
+
+/**
+ * dpseci_get_irq_enable() - Get overall interrupt state
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param irq_index The interrupt index to configure
+ * @param en Returned Interrupt state - enable = 1,
+ * disable = 0
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_get_irq_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint8_t *en);
+
+/**
+ * dpseci_set_irq_mask() - Set interrupt mask.
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param irq_index The interrupt index to configure
+ * @param mask event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting IRQ
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_set_irq_mask(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t mask);
+
+/**
+ * dpseci_get_irq_mask() - Get interrupt mask.
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param irq_index The interrupt index to configure
+ * @param mask Returned event mask to trigger interrupt
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_get_irq_mask(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t *mask);
+
+/**
+ * dpseci_get_irq_status() - Get the current status of any pending interrupts
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param irq_index The interrupt index to configure
+ * @param status Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_get_irq_status(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t *status);
+
+/**
+ * dpseci_clear_irq_status() - Clear a pending interrupt's status
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param irq_index The interrupt index to configure
+ * @param status bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_clear_irq_status(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t irq_index,
+ uint32_t status);
+
+/**
+ * struct dpseci_attr - Structure representing DPSECI attributes
+ * @param id: DPSECI object ID
+ * @param num_tx_queues: number of queues towards the SEC
+ * @param num_rx_queues: number of queues back from the SEC
+ */
+struct dpseci_attr {
+ int id; /* DPSECI object ID */
+ uint8_t num_tx_queues; /* number of queues towards the SEC */
+ uint8_t num_rx_queues; /* number of queues back from the SEC */
+};
+
+/**
+ * dpseci_get_attributes() - Retrieve DPSECI attributes.
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param attr Returned object's attributes
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_attr *attr);
+
+/**
+ * enum dpseci_dest - DPSECI destination types
+ * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ * and does not generate FQDAN notifications; user is expected to
+ * dequeue from the queue based on polling or other user-defined
+ * method
+ * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue
+ * from the queue only after notification is received
+ * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON
+ * object; user is expected to dequeue from the DPCON channel
+ */
+enum dpseci_dest {
+ DPSECI_DEST_NONE = 0,
+ DPSECI_DEST_DPIO = 1,
+ DPSECI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
+ */
+struct dpseci_dest_cfg {
+ enum dpseci_dest dest_type; /* Destination type */
+ int dest_id;
+ /* Either DPIO ID or DPCON ID, depending on the destination type */
+ uint8_t priority;
+ /* Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that
+ * channel; not relevant for 'DPSECI_DEST_NONE' option
+ */
+};
+
+/**
+ * DPSECI queue modification options
+ */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPSECI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * Select to modify the queue's order preservation
+ */
+#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
+
+/**
+ * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
+ */
+struct dpseci_rx_queue_cfg {
+ uint32_t options;
+ /* Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
+ */
+ int order_preservation_en;
+ /* order preservation configuration for the rx queue
+ * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in
+ * 'options'
+ */
+ uint64_t user_ctx;
+ /* User context value provided in the frame descriptor of each
+ * dequeued frame;
+ * valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained in 'options'
+ */
+ struct dpseci_dest_cfg dest_cfg;
+ /* Queue destination parameters;
+ * valid only if 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+};
+
+/**
+ * dpseci_set_rx_queue() - Set Rx queue configuration
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param queue Select the queue relative to number of
+ * priorities configured at DPSECI creation; use
+ * DPSECI_ALL_QUEUES to configure all Rx queues
+ * identically.
+ * @param cfg Rx queue configuration
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ const struct dpseci_rx_queue_cfg *cfg);
+
+/**
+ * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
+ */
+struct dpseci_rx_queue_attr {
+ uint64_t user_ctx;
+ /* User context value provided in the frame descriptor of
+ * each dequeued frame
+ */
+ int order_preservation_en;
+ /* Status of the order preservation configuration on the queue */
+ struct dpseci_dest_cfg dest_cfg;
+ /* Queue destination configuration */
+ uint32_t fqid;
+ /* Virtual FQID value to be used for dequeue operations */
+};
+
+/**
+ * dpseci_get_rx_queue() - Retrieve Rx queue attributes.
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param queue Select the queue relative to number of
+ * priorities configured at DPSECI creation
+ * @param attr Returned Rx queue attributes
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_rx_queue_attr *attr);
+
+/**
+ * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
+ */
+struct dpseci_tx_queue_attr {
+ uint32_t fqid;
+ /* Virtual FQID to be used for sending frames to SEC hardware */
+ uint8_t priority;
+ /* SEC hardware processing priority for the queue */
+};
+
+/**
+ * dpseci_get_tx_queue() - Retrieve Tx queue attributes.
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param queue Select the queue relative to number of
+ * priorities configured at DPSECI creation
+ * @param attr Returned Tx queue attributes
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_tx_queue_attr *attr);
+
+/**
+ * struct dpseci_sec_attr - Structure representing attributes of the SEC
+ * hardware accelerator
+ */
+
+struct dpseci_sec_attr {
+ uint16_t ip_id; /* ID for SEC */
+ uint8_t major_rev; /* Major revision number for SEC */
+ uint8_t minor_rev; /* Minor revision number for SEC */
+ uint8_t era; /* SEC Era */
+ uint8_t deco_num;
+ /* The number of copies of the DECO that are implemented in
+ * this version of SEC
+ */
+ uint8_t zuc_auth_acc_num;
+ /* The number of copies of ZUCA that are implemented in this
+ * version of SEC
+ */
+ uint8_t zuc_enc_acc_num;
+ /* The number of copies of ZUCE that are implemented in this
+ * version of SEC
+ */
+ uint8_t snow_f8_acc_num;
+ /* The number of copies of the SNOW-f8 module that are
+ * implemented in this version of SEC
+ */
+ uint8_t snow_f9_acc_num;
+ /* The number of copies of the SNOW-f9 module that are
+ * implemented in this version of SEC
+ */
+ uint8_t crc_acc_num;
+ /* The number of copies of the CRC module that are implemented
+ * in this version of SEC
+ */
+ uint8_t pk_acc_num;
+ /* The number of copies of the Public Key module that are
+ * implemented in this version of SEC
+ */
+ uint8_t kasumi_acc_num;
+ /* The number of copies of the Kasumi module that are
+ * implemented in this version of SEC
+ */
+ uint8_t rng_acc_num;
+ /* The number of copies of the Random Number Generator that are
+ * implemented in this version of SEC
+ */
+ uint8_t md_acc_num;
+ /* The number of copies of the MDHA (Hashing module) that are
+ * implemented in this version of SEC
+ */
+ uint8_t arc4_acc_num;
+ /* The number of copies of the ARC4 module that are implemented
+ * in this version of SEC
+ */
+ uint8_t des_acc_num;
+ /* The number of copies of the DES module that are implemented
+ * in this version of SEC
+ */
+ uint8_t aes_acc_num;
+ /* The number of copies of the AES module that are implemented
+ * in this version of SEC
+ */
+};
+
+/**
+ * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes.
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param attr Returned SEC attributes
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_attr *attr);
+
+/**
+ * struct dpseci_sec_counters - Structure representing global SEC counters and
+ * not per dpseci counters
+ */
+struct dpseci_sec_counters {
+ uint64_t dequeued_requests; /* Number of Requests Dequeued */
+ uint64_t ob_enc_requests; /* Number of Outbound Encrypt Requests */
+ uint64_t ib_dec_requests; /* Number of Inbound Decrypt Requests */
+ uint64_t ob_enc_bytes; /* Number of Outbound Bytes Encrypted */
+ uint64_t ob_prot_bytes; /* Number of Outbound Bytes Protected */
+ uint64_t ib_dec_bytes; /* Number of Inbound Bytes Decrypted */
+ uint64_t ib_valid_bytes; /* Number of Inbound Bytes Validated */
+};
+
+/**
+ * dpseci_get_sec_counters() - Retrieve SEC accelerator counters.
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param token Token of DPSECI object
+ * @param counters Returned SEC counters
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_counters *counters);
+
+/**
+ * dpseci_get_api_version() - Get Data Path SEC Interface API version
+ * @param mc_io Pointer to MC portal's I/O object
+ * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
+ * @param major_ver Major version of data path sec API
+ * @param minor_ver Minor version of data path sec API
+ *
+ * @return:
+ * - Return '0' on Success.
+ * - Return Error code otherwise.
+ */
+int
+dpseci_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
+
+#endif /* __FSL_DPSECI_H */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
new file mode 100644
index 00000000..1fb18cc3
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
@@ -0,0 +1,256 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _FSL_DPSECI_CMD_H
+#define _FSL_DPSECI_CMD_H
+
+/* DPSECI Version */
+#define DPSECI_VER_MAJOR 5
+#define DPSECI_VER_MINOR 0
+
+/* Command IDs */
+#define DPSECI_CMDID_CLOSE ((0x800 << 4) | (0x1))
+#define DPSECI_CMDID_OPEN ((0x809 << 4) | (0x1))
+#define DPSECI_CMDID_CREATE ((0x909 << 4) | (0x1))
+#define DPSECI_CMDID_DESTROY ((0x989 << 4) | (0x1))
+#define DPSECI_CMDID_GET_API_VERSION ((0xa09 << 4) | (0x1))
+
+#define DPSECI_CMDID_ENABLE ((0x002 << 4) | (0x1))
+#define DPSECI_CMDID_DISABLE ((0x003 << 4) | (0x1))
+#define DPSECI_CMDID_GET_ATTR ((0x004 << 4) | (0x1))
+#define DPSECI_CMDID_RESET ((0x005 << 4) | (0x1))
+#define DPSECI_CMDID_IS_ENABLED ((0x006 << 4) | (0x1))
+
+#define DPSECI_CMDID_SET_IRQ ((0x010 << 4) | (0x1))
+#define DPSECI_CMDID_GET_IRQ ((0x011 << 4) | (0x1))
+#define DPSECI_CMDID_SET_IRQ_ENABLE ((0x012 << 4) | (0x1))
+#define DPSECI_CMDID_GET_IRQ_ENABLE ((0x013 << 4) | (0x1))
+#define DPSECI_CMDID_SET_IRQ_MASK ((0x014 << 4) | (0x1))
+#define DPSECI_CMDID_GET_IRQ_MASK ((0x015 << 4) | (0x1))
+#define DPSECI_CMDID_GET_IRQ_STATUS ((0x016 << 4) | (0x1))
+#define DPSECI_CMDID_CLEAR_IRQ_STATUS ((0x017 << 4) | (0x1))
+
+#define DPSECI_CMDID_SET_RX_QUEUE ((0x194 << 4) | (0x1))
+#define DPSECI_CMDID_GET_RX_QUEUE ((0x196 << 4) | (0x1))
+#define DPSECI_CMDID_GET_TX_QUEUE ((0x197 << 4) | (0x1))
+#define DPSECI_CMDID_GET_SEC_ATTR ((0x198 << 4) | (0x1))
+#define DPSECI_CMDID_GET_SEC_COUNTERS ((0x199 << 4) | (0x1))
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_OPEN(cmd, dpseci_id) \
+ MC_CMD_OP(cmd, 0, 0, 32, int, dpseci_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_CREATE(cmd, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->priorities[0]);\
+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[1]);\
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[2]);\
+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->priorities[3]);\
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->priorities[4]);\
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->priorities[5]);\
+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->priorities[6]);\
+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->priorities[7]);\
+ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->num_tx_queues);\
+ MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->num_rx_queues);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_IS_ENABLED(cmd, en) \
+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_GET_IRQ(cmd, irq_index) \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_GET_IRQ(cmd, type, irq_cfg) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_GET_IRQ_MASK(cmd, irq_index) \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_GET_IRQ_MASK(cmd, mask) \
+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_GET_IRQ_STATUS(cmd, status) \
+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_GET_ATTR(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \
+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->num_tx_queues); \
+ MC_RSP_OP(cmd, 1, 8, 8, uint8_t, attr->num_rx_queues); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_SET_RX_QUEUE(cmd, queue, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue); \
+ MC_CMD_OP(cmd, 0, 48, 4, enum dpseci_dest, cfg->dest_cfg.dest_type); \
+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
+ MC_CMD_OP(cmd, 2, 32, 1, int, cfg->order_preservation_en);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_GET_RX_QUEUE(cmd, queue) \
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_GET_RX_QUEUE(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
+ MC_RSP_OP(cmd, 0, 48, 4, enum dpseci_dest, attr->dest_cfg.dest_type);\
+ MC_RSP_OP(cmd, 1, 0, 8, uint64_t, attr->user_ctx);\
+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
+ MC_RSP_OP(cmd, 2, 32, 1, int, attr->order_preservation_en);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_CMD_GET_TX_QUEUE(cmd, queue) \
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_GET_TX_QUEUE(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, attr->fqid);\
+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->priority);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_GET_SEC_ATTR(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, attr->ip_id);\
+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, attr->major_rev);\
+ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, attr->minor_rev);\
+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->era);\
+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->deco_num);\
+ MC_RSP_OP(cmd, 1, 8, 8, uint8_t, attr->zuc_auth_acc_num);\
+ MC_RSP_OP(cmd, 1, 16, 8, uint8_t, attr->zuc_enc_acc_num);\
+ MC_RSP_OP(cmd, 1, 32, 8, uint8_t, attr->snow_f8_acc_num);\
+ MC_RSP_OP(cmd, 1, 40, 8, uint8_t, attr->snow_f9_acc_num);\
+ MC_RSP_OP(cmd, 1, 48, 8, uint8_t, attr->crc_acc_num);\
+ MC_RSP_OP(cmd, 2, 0, 8, uint8_t, attr->pk_acc_num);\
+ MC_RSP_OP(cmd, 2, 8, 8, uint8_t, attr->kasumi_acc_num);\
+ MC_RSP_OP(cmd, 2, 16, 8, uint8_t, attr->rng_acc_num);\
+ MC_RSP_OP(cmd, 2, 32, 8, uint8_t, attr->md_acc_num);\
+ MC_RSP_OP(cmd, 2, 40, 8, uint8_t, attr->arc4_acc_num);\
+ MC_RSP_OP(cmd, 2, 48, 8, uint8_t, attr->des_acc_num);\
+ MC_RSP_OP(cmd, 2, 56, 8, uint8_t, attr->aes_acc_num);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_GET_SEC_COUNTERS(cmd, counters) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, counters->dequeued_requests);\
+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counters->ob_enc_requests);\
+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, counters->ib_dec_requests);\
+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, counters->ob_enc_bytes);\
+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, counters->ob_prot_bytes);\
+ MC_RSP_OP(cmd, 5, 0, 64, uint64_t, counters->ib_dec_bytes);\
+ MC_RSP_OP(cmd, 6, 0, 64, uint64_t, counters->ib_valid_bytes);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPSECI_RSP_GET_API_VERSION(cmd, major, minor) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, major);\
+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, minor);\
+} while (0)
+
+#endif /* _FSL_DPSECI_CMD_H */
diff --git a/src/seastar/dpdk/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map
new file mode 100644
index 00000000..8591cc0b
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+
+ local: *;
+};
diff --git a/src/seastar/dpdk/drivers/crypto/kasumi/Makefile b/src/seastar/dpdk/drivers/crypto/kasumi/Makefile
new file mode 100644
index 00000000..b47cda0c
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/kasumi/Makefile
@@ -0,0 +1,62 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifeq ($(LIBSSO_KASUMI_PATH),)
+$(error "Please define LIBSSO_KASUMI_PATH environment variable")
+endif
+endif
+
+# library name
+LIB = librte_pmd_kasumi.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_kasumi_version.map
+
+# external library dependencies
+CFLAGS += -I$(LIBSSO_KASUMI_PATH)
+CFLAGS += -I$(LIBSSO_KASUMI_PATH)/include
+CFLAGS += -I$(LIBSSO_KASUMI_PATH)/build
+LDLIBS += -L$(LIBSSO_KASUMI_PATH)/build -lsso_kasumi
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += rte_kasumi_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += rte_kasumi_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/seastar/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd.c b/src/seastar/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd.c
new file mode 100644
index 00000000..9da9e897
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd.c
@@ -0,0 +1,666 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_kasumi_pmd_private.h"
+
+#define KASUMI_KEY_LENGTH 16
+#define KASUMI_IV_LENGTH 8
+#define KASUMI_DIGEST_LENGTH 4
+#define KASUMI_MAX_BURST 4
+#define BYTE_LEN 8
+
+/** Get xform chain order. */
+static enum kasumi_operation
+kasumi_get_mode(const struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return KASUMI_OP_NOT_SUPPORTED;
+
+ if (xform->next)
+ if (xform->next->next != NULL)
+ return KASUMI_OP_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return KASUMI_OP_ONLY_AUTH;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return KASUMI_OP_AUTH_CIPHER;
+ else
+ return KASUMI_OP_NOT_SUPPORTED;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return KASUMI_OP_ONLY_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return KASUMI_OP_CIPHER_AUTH;
+ else
+ return KASUMI_OP_NOT_SUPPORTED;
+ }
+
+ return KASUMI_OP_NOT_SUPPORTED;
+}
+
+
+/** Parse crypto xform chain and set private session parameters. */
+int
+kasumi_set_session_parameters(struct kasumi_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ enum kasumi_operation mode;
+
+ /* Select Crypto operation - hash then cipher / cipher then hash */
+ mode = kasumi_get_mode(xform);
+
+ switch (mode) {
+ case KASUMI_OP_CIPHER_AUTH:
+ auth_xform = xform->next;
+ /* Fall-through */
+ case KASUMI_OP_ONLY_CIPHER:
+ cipher_xform = xform;
+ break;
+ case KASUMI_OP_AUTH_CIPHER:
+ cipher_xform = xform->next;
+ /* Fall-through */
+ case KASUMI_OP_ONLY_AUTH:
+ auth_xform = xform;
+ break;
+ case KASUMI_OP_NOT_SUPPORTED:
+ default:
+ KASUMI_LOG_ERR("Unsupported operation chain order parameter");
+ return -EINVAL;
+ }
+
+ if (cipher_xform) {
+ /* Only KASUMI F8 supported */
+ if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8)
+ return -EINVAL;
+ /* Initialize key */
+ sso_kasumi_init_f8_key_sched(cipher_xform->cipher.key.data,
+ &sess->pKeySched_cipher);
+ }
+
+ if (auth_xform) {
+ /* Only KASUMI F9 supported */
+ if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9)
+ return -EINVAL;
+ sess->auth_op = auth_xform->auth.op;
+ /* Initialize key */
+ sso_kasumi_init_f9_key_sched(auth_xform->auth.key.data,
+ &sess->pKeySched_hash);
+ }
+
+
+ sess->op = mode;
+
+ return 0;
+}
+
+/** Get KASUMI session. */
+static struct kasumi_session *
+kasumi_get_session(struct kasumi_qp *qp, struct rte_crypto_op *op)
+{
+ struct kasumi_session *sess;
+
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session->dev_type !=
+ RTE_CRYPTODEV_KASUMI_PMD))
+ return NULL;
+
+ sess = (struct kasumi_session *)op->sym->session->_private;
+ } else {
+ struct rte_cryptodev_session *c_sess = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ return NULL;
+
+ sess = (struct kasumi_session *)c_sess->_private;
+
+ if (unlikely(kasumi_set_session_parameters(sess,
+ op->sym->xform) != 0))
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Encrypt/decrypt mbufs with same cipher key. */
+static uint8_t
+process_kasumi_cipher_op(struct rte_crypto_op **ops,
+ struct kasumi_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src[num_ops], *dst[num_ops];
+ uint64_t IV[num_ops];
+ uint32_t num_bytes[num_ops];
+
+ for (i = 0; i < num_ops; i++) {
+ /* Sanity checks. */
+ if (ops[i]->sym->cipher.iv.length != KASUMI_IV_LENGTH) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ KASUMI_LOG_ERR("iv");
+ break;
+ }
+
+ src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ dst[i] = ops[i]->sym->m_dst ?
+ rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3) :
+ rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ IV[i] = *((uint64_t *)(ops[i]->sym->cipher.iv.data));
+ num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
+
+ processed_ops++;
+ }
+
+ if (processed_ops != 0)
+ sso_kasumi_f8_n_buffer(&session->pKeySched_cipher, IV,
+ src, dst, num_bytes, processed_ops);
+
+ return processed_ops;
+}
+
+/** Encrypt/decrypt mbuf (bit level function). */
+static uint8_t
+process_kasumi_cipher_op_bit(struct rte_crypto_op *op,
+ struct kasumi_session *session)
+{
+ uint8_t *src, *dst;
+ uint64_t IV;
+ uint32_t length_in_bits, offset_in_bits;
+
+ /* Sanity checks. */
+ if (unlikely(op->sym->cipher.iv.length != KASUMI_IV_LENGTH)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ KASUMI_LOG_ERR("iv");
+ return 0;
+ }
+
+ offset_in_bits = op->sym->cipher.data.offset;
+ src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
+ if (op->sym->m_dst == NULL) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ KASUMI_LOG_ERR("bit-level in-place not supported\n");
+ return 0;
+ }
+ dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
+ IV = *((uint64_t *)(op->sym->cipher.iv.data));
+ length_in_bits = op->sym->cipher.data.length;
+
+ sso_kasumi_f8_1_buffer_bit(&session->pKeySched_cipher, IV,
+ src, dst, length_in_bits, offset_in_bits);
+
+ return 1;
+}
+
+/** Generate/verify hash from mbufs with same hash key. */
+static int
+process_kasumi_hash_op(struct rte_crypto_op **ops,
+ struct kasumi_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src, *dst;
+ uint32_t length_in_bits;
+ uint32_t num_bytes;
+ uint32_t shift_bits;
+ uint64_t IV;
+ uint8_t direction;
+
+ for (i = 0; i < num_ops; i++) {
+ if (unlikely(ops[i]->sym->auth.aad.length != KASUMI_IV_LENGTH)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ KASUMI_LOG_ERR("aad");
+ break;
+ }
+
+ if (unlikely(ops[i]->sym->auth.digest.length != KASUMI_DIGEST_LENGTH)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ KASUMI_LOG_ERR("digest");
+ break;
+ }
+
+ /* Data must be byte aligned */
+ if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ KASUMI_LOG_ERR("offset");
+ break;
+ }
+
+ length_in_bits = ops[i]->sym->auth.data.length;
+
+ src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->auth.data.offset >> 3);
+ /* IV from AAD */
+ IV = *((uint64_t *)(ops[i]->sym->auth.aad.data));
+ /* Direction from next bit after end of message */
+ num_bytes = (length_in_bits >> 3) + 1;
+ shift_bits = (BYTE_LEN - 1 - length_in_bits) % BYTE_LEN;
+ direction = (src[num_bytes - 1] >> shift_bits) & 0x01;
+
+ if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ dst = (uint8_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
+ ops[i]->sym->auth.digest.length);
+
+ sso_kasumi_f9_1_buffer_user(&session->pKeySched_hash,
+ IV, src,
+ length_in_bits, dst, direction);
+ /* Verify digest. */
+ if (memcmp(dst, ops[i]->sym->auth.digest.data,
+ ops[i]->sym->auth.digest.length) != 0)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+ /* Trim area used for digest from mbuf. */
+ rte_pktmbuf_trim(ops[i]->sym->m_src,
+ ops[i]->sym->auth.digest.length);
+ } else {
+ dst = ops[i]->sym->auth.digest.data;
+
+ sso_kasumi_f9_1_buffer_user(&session->pKeySched_hash,
+ IV, src,
+ length_in_bits, dst, direction);
+ }
+ processed_ops++;
+ }
+
+ return processed_ops;
+}
+
+/** Process a batch of crypto ops which shares the same session. */
+static int
+process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
+ struct kasumi_qp *qp, uint8_t num_ops,
+ uint16_t *accumulated_enqueued_ops)
+{
+ unsigned i;
+ unsigned enqueued_ops, processed_ops;
+
+ switch (session->op) {
+ case KASUMI_OP_ONLY_CIPHER:
+ processed_ops = process_kasumi_cipher_op(ops,
+ session, num_ops);
+ break;
+ case KASUMI_OP_ONLY_AUTH:
+ processed_ops = process_kasumi_hash_op(ops, session,
+ num_ops);
+ break;
+ case KASUMI_OP_CIPHER_AUTH:
+ processed_ops = process_kasumi_cipher_op(ops, session,
+ num_ops);
+ process_kasumi_hash_op(ops, session, processed_ops);
+ break;
+ case KASUMI_OP_AUTH_CIPHER:
+ processed_ops = process_kasumi_hash_op(ops, session,
+ num_ops);
+ process_kasumi_cipher_op(ops, session, processed_ops);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_ops = 0;
+ }
+
+ for (i = 0; i < num_ops; i++) {
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ /* Free session if a session-less crypto op. */
+ if (ops[i]->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
+ ops[i]->sym->session = NULL;
+ }
+ }
+
+ enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
+ (void **)ops, processed_ops, NULL);
+ qp->qp_stats.enqueued_count += enqueued_ops;
+ *accumulated_enqueued_ops += enqueued_ops;
+
+ return enqueued_ops;
+}
+
+/** Process a crypto op with length/offset in bits. */
+static int
+process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
+ struct kasumi_qp *qp, uint16_t *accumulated_enqueued_ops)
+{
+ unsigned enqueued_op, processed_op;
+
+ switch (session->op) {
+ case KASUMI_OP_ONLY_CIPHER:
+ processed_op = process_kasumi_cipher_op_bit(op,
+ session);
+ break;
+ case KASUMI_OP_ONLY_AUTH:
+ processed_op = process_kasumi_hash_op(&op, session, 1);
+ break;
+ case KASUMI_OP_CIPHER_AUTH:
+ processed_op = process_kasumi_cipher_op_bit(op, session);
+ if (processed_op == 1)
+ process_kasumi_hash_op(&op, session, 1);
+ break;
+ case KASUMI_OP_AUTH_CIPHER:
+ processed_op = process_kasumi_hash_op(&op, session, 1);
+ if (processed_op == 1)
+ process_kasumi_cipher_op_bit(op, session);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_op = 0;
+ }
+
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ /* Free session if a session-less crypto op. */
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ enqueued_op = rte_ring_enqueue_burst(qp->processed_ops, (void **)&op,
+ processed_op, NULL);
+ qp->qp_stats.enqueued_count += enqueued_op;
+ *accumulated_enqueued_ops += enqueued_op;
+
+ return enqueued_op;
+}
+
+static uint16_t
+kasumi_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_crypto_op *c_ops[nb_ops];
+ struct rte_crypto_op *curr_c_op;
+
+ struct kasumi_session *prev_sess = NULL, *curr_sess = NULL;
+ struct kasumi_qp *qp = queue_pair;
+ unsigned i;
+ uint8_t burst_size = 0;
+ uint16_t enqueued_ops = 0;
+ uint8_t processed_ops;
+
+ for (i = 0; i < nb_ops; i++) {
+ curr_c_op = ops[i];
+
+#ifdef RTE_LIBRTE_PMD_KASUMI_DEBUG
+ if (!rte_pktmbuf_is_contiguous(curr_c_op->sym->m_src) ||
+ (curr_c_op->sym->m_dst != NULL &&
+ !rte_pktmbuf_is_contiguous(
+ curr_c_op->sym->m_dst))) {
+ KASUMI_LOG_ERR("PMD supports only contiguous mbufs, "
+ "op (%p) provides noncontiguous mbuf as "
+ "source/destination buffer.\n", curr_c_op);
+ curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+#endif
+
+ /* Set status as enqueued (not processed yet) by default. */
+ curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ curr_sess = kasumi_get_session(qp, curr_c_op);
+ if (unlikely(curr_sess == NULL ||
+ curr_sess->op == KASUMI_OP_NOT_SUPPORTED)) {
+ curr_c_op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ break;
+ }
+
+ /* If length/offset is at bit-level, process this buffer alone. */
+ if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
+ || ((ops[i]->sym->cipher.data.offset
+ % BYTE_LEN) != 0)) {
+ /* Process the ops of the previous session. */
+ if (prev_sess != NULL) {
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = NULL;
+ }
+
+ processed_ops = process_op_bit(curr_c_op, curr_sess,
+ qp, &enqueued_ops);
+ if (processed_ops != 1)
+ break;
+
+ continue;
+ }
+
+ /* Batch ops that share the same session. */
+ if (prev_sess == NULL) {
+ prev_sess = curr_sess;
+ c_ops[burst_size++] = curr_c_op;
+ } else if (curr_sess == prev_sess) {
+ c_ops[burst_size++] = curr_c_op;
+ /*
+ * When there are enough ops to process in a batch,
+ * process them, and start a new batch.
+ */
+ if (burst_size == KASUMI_MAX_BURST) {
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = NULL;
+ }
+ } else {
+ /*
+ * Different session, process the ops
+ * of the previous session.
+ */
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = curr_sess;
+
+ c_ops[burst_size++] = curr_c_op;
+ }
+ }
+
+ if (burst_size != 0) {
+ /* Process the crypto ops of the last session. */
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ }
+
+ qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
+ return enqueued_ops;
+}
+
+static uint16_t
+kasumi_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **c_ops, uint16_t nb_ops)
+{
+ struct kasumi_qp *qp = queue_pair;
+
+ unsigned nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)c_ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+static int cryptodev_kasumi_remove(struct rte_vdev_device *vdev);
+
+static int
+cryptodev_kasumi_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_crypto_vdev_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct kasumi_private *internals;
+ uint64_t cpu_flags = 0;
+
+ if (init_params->name[0] == '\0')
+ snprintf(init_params->name, sizeof(init_params->name),
+ "%s", name);
+
+ /* Check CPU for supported vector instruction set */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+ cpu_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+ cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+ else {
+ KASUMI_LOG_ERR("Vector instructions are not supported by CPU");
+ return -EFAULT;
+ }
+
+ dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
+ sizeof(struct kasumi_private), init_params->socket_id);
+ if (dev == NULL) {
+ KASUMI_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->dev_type = RTE_CRYPTODEV_KASUMI_PMD;
+ dev->dev_ops = rte_kasumi_pmd_ops;
+
+ /* Register RX/TX burst functions for data path. */
+ dev->dequeue_burst = kasumi_pmd_dequeue_burst;
+ dev->enqueue_burst = kasumi_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ cpu_flags;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ return 0;
+init_error:
+ KASUMI_LOG_ERR("driver %s: cryptodev_kasumi_create failed",
+ init_params->name);
+
+ cryptodev_kasumi_remove(vdev);
+ return -EFAULT;
+}
+
+static int
+cryptodev_kasumi_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_crypto_vdev_init_params init_params = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id(),
+ {0}
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+
+ rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.socket_id);
+ if (init_params.name[0] != '\0')
+ RTE_LOG(INFO, PMD, " User defined name = %s\n",
+ init_params.name);
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.max_nb_sessions);
+
+ return cryptodev_kasumi_create(name, vdev, &init_params);
+}
+
+static int
+cryptodev_kasumi_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Closing KASUMI crypto device %s"
+ " on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+static struct rte_vdev_driver cryptodev_kasumi_pmd_drv = {
+ .probe = cryptodev_kasumi_probe,
+ .remove = cryptodev_kasumi_remove
+};
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int>");
diff --git a/src/seastar/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c b/src/seastar/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
new file mode 100644
index 00000000..62ebdbd2
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
@@ -0,0 +1,345 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_kasumi_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities kasumi_pmd_capabilities[] = {
+ { /* KASUMI (F9) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_KASUMI_F9,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* KASUMI (F8) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_KASUMI_F8,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+kasumi_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+kasumi_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+kasumi_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+kasumi_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+kasumi_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct kasumi_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+kasumi_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct kasumi_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+kasumi_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct kasumi_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->dev_type = dev->dev_type;
+ dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = kasumi_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+kasumi_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct kasumi_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp != NULL) {
+ rte_ring_free(qp->processed_ops);
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+kasumi_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct kasumi_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "kasumi_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place processed ops on */
+static struct rte_ring *
+kasumi_pmd_qp_create_processed_ops_ring(struct kasumi_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) == ring_size) {
+ KASUMI_LOG_INFO("Reusing existing ring %s"
+ " for processed packets",
+ qp->name);
+ return r;
+ }
+
+ KASUMI_LOG_ERR("Unable to reuse existing ring %s"
+ " for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+kasumi_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct kasumi_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ kasumi_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("KASUMI PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (kasumi_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = kasumi_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = dev->data->session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Start queue pair */
+static int
+kasumi_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+kasumi_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+kasumi_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the KASUMI session structure */
+static unsigned
+kasumi_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct kasumi_session);
+}
+
+/** Configure a KASUMI session from a crypto xform chain */
+static void *
+kasumi_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ if (unlikely(sess == NULL)) {
+ KASUMI_LOG_ERR("invalid session struct");
+ return NULL;
+ }
+
+ if (kasumi_set_session_parameters(sess, xform) != 0) {
+ KASUMI_LOG_ERR("failed configure session parameters");
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+kasumi_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+ /*
+ * Current just resetting the whole data structure, need to investigate
+ * whether a more selective reset of key would be more performant
+ */
+ if (sess)
+ memset(sess, 0, sizeof(struct kasumi_session));
+}
+
+struct rte_cryptodev_ops kasumi_pmd_ops = {
+ .dev_configure = kasumi_pmd_config,
+ .dev_start = kasumi_pmd_start,
+ .dev_stop = kasumi_pmd_stop,
+ .dev_close = kasumi_pmd_close,
+
+ .stats_get = kasumi_pmd_stats_get,
+ .stats_reset = kasumi_pmd_stats_reset,
+
+ .dev_infos_get = kasumi_pmd_info_get,
+
+ .queue_pair_setup = kasumi_pmd_qp_setup,
+ .queue_pair_release = kasumi_pmd_qp_release,
+ .queue_pair_start = kasumi_pmd_qp_start,
+ .queue_pair_stop = kasumi_pmd_qp_stop,
+ .queue_pair_count = kasumi_pmd_qp_count,
+
+ .session_get_size = kasumi_pmd_session_get_size,
+ .session_configure = kasumi_pmd_session_configure,
+ .session_clear = kasumi_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_kasumi_pmd_ops = &kasumi_pmd_ops;
diff --git a/src/seastar/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_private.h b/src/seastar/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
new file mode 100644
index 00000000..fb586caa
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
@@ -0,0 +1,106 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_KASUMI_PMD_PRIVATE_H_
+#define _RTE_KASUMI_PMD_PRIVATE_H_
+
+#include <sso_kasumi.h>
+
+#define KASUMI_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_KASUMI_DEBUG
+#define KASUMI_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), \
+ __func__, __LINE__, ## args)
+
+#define KASUMI_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), \
+ __func__, __LINE__, ## args)
+#else
+#define KASUMI_LOG_INFO(fmt, args...)
+#define KASUMI_LOG_DBG(fmt, args...)
+#endif
+
+/** private data structure for each virtual KASUMI device */
+struct kasumi_private {
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+ unsigned max_nb_sessions;
+ /**< Max number of sessions supported by device */
+};
+
+/** KASUMI buffer queue pair */
+struct kasumi_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing processed ops */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+} __rte_cache_aligned;
+
+enum kasumi_operation {
+ KASUMI_OP_ONLY_CIPHER,
+ KASUMI_OP_ONLY_AUTH,
+ KASUMI_OP_CIPHER_AUTH,
+ KASUMI_OP_AUTH_CIPHER,
+ KASUMI_OP_NOT_SUPPORTED
+};
+
+/** KASUMI private session structure */
+struct kasumi_session {
+ /* Keys have to be 16-byte aligned */
+ sso_kasumi_key_sched_t pKeySched_cipher;
+ sso_kasumi_key_sched_t pKeySched_hash;
+ enum kasumi_operation op;
+ enum rte_crypto_auth_operation auth_op;
+} __rte_cache_aligned;
+
+
+int
+kasumi_set_session_parameters(struct kasumi_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/** device specific operations function pointer structure */
+struct rte_cryptodev_ops *rte_kasumi_pmd_ops;
+
+#endif /* _RTE_KASUMI_PMD_PRIVATE_H_ */
diff --git a/src/seastar/dpdk/drivers/crypto/kasumi/rte_pmd_kasumi_version.map b/src/seastar/dpdk/drivers/crypto/kasumi/rte_pmd_kasumi_version.map
new file mode 100644
index 00000000..8ffeca93
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/kasumi/rte_pmd_kasumi_version.map
@@ -0,0 +1,3 @@
+DPDK_16.07 {
+ local: *;
+};
diff --git a/src/seastar/dpdk/drivers/crypto/null/Makefile b/src/seastar/dpdk/drivers/crypto/null/Makefile
new file mode 100644
index 00000000..bc2724b3
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/null/Makefile
@@ -0,0 +1,54 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+
+# library name
+LIB = librte_pmd_null_crypto.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_null_crypto_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null_crypto_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null_crypto_pmd_ops.c
+
+# export include files
+SYMLINK-y-include +=
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/seastar/dpdk/drivers/crypto/null/null_crypto_pmd.c b/src/seastar/dpdk/drivers/crypto/null/null_crypto_pmd.c
new file mode 100644
index 00000000..023450a3
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/null/null_crypto_pmd.c
@@ -0,0 +1,270 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_vdev.h>
+#include <rte_malloc.h>
+
+#include "null_crypto_pmd_private.h"
+
+/** verify and set session parameters */
+int
+null_crypto_set_session_parameters(
+ struct null_crypto_session *sess __rte_unused,
+ const struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL) {
+ return -1;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL) {
+ /* Authentication Only */
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
+ return 0;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ /* Authentication then Cipher */
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
+ xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
+ return 0;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next == NULL) {
+ /* Cipher Only */
+ if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
+ return 0;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ /* Cipher then Authentication */
+ if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL &&
+ xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
+ return 0;
+ }
+
+ return -1;
+}
+
+/** Process crypto operation for mbuf */
+static int
+process_op(const struct null_crypto_qp *qp, struct rte_crypto_op *op,
+ struct null_crypto_session *sess __rte_unused)
+{
+ /* set status as successful by default */
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ /*
+ * if crypto session and operation are valid just enqueue the packet
+ * in the processed ring
+ */
+ return rte_ring_enqueue(qp->processed_pkts, (void *)op);
+}
+
+static struct null_crypto_session *
+get_session(struct null_crypto_qp *qp, struct rte_crypto_sym_op *op)
+{
+ struct null_crypto_session *sess;
+
+ if (op->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->session == NULL ||
+ op->session->dev_type != RTE_CRYPTODEV_NULL_PMD))
+ return NULL;
+
+ sess = (struct null_crypto_session *)op->session->_private;
+ } else {
+ struct rte_cryptodev_session *c_sess = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ return NULL;
+
+ sess = (struct null_crypto_session *)c_sess->_private;
+
+ if (null_crypto_set_session_parameters(sess, op->xform) != 0)
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Enqueue burst */
+static uint16_t
+null_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct null_crypto_session *sess;
+ struct null_crypto_qp *qp = queue_pair;
+
+ int i, retval;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_session(qp, ops[i]->sym);
+ if (unlikely(sess == NULL))
+ goto enqueue_err;
+
+ retval = process_op(qp, ops[i], sess);
+ if (unlikely(retval < 0))
+ goto enqueue_err;
+ }
+
+ qp->qp_stats.enqueued_count += i;
+ return i;
+
+enqueue_err:
+ if (ops[i])
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+ qp->qp_stats.enqueue_err_count++;
+ return i;
+}
+
+/** Dequeue burst */
+static uint16_t
+null_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct null_crypto_qp *qp = queue_pair;
+
+ unsigned nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+static int cryptodev_null_remove(const char *name);
+
+/** Create crypto device */
+static int
+cryptodev_null_create(const char *name,
+ struct rte_crypto_vdev_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct null_crypto_private *internals;
+
+ if (init_params->name[0] == '\0')
+ snprintf(init_params->name, sizeof(init_params->name),
+ "%s", name);
+
+ dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
+ sizeof(struct null_crypto_private),
+ init_params->socket_id);
+ if (dev == NULL) {
+ NULL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->dev_type = RTE_CRYPTODEV_NULL_PMD;
+ dev->dev_ops = null_crypto_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = null_crypto_pmd_dequeue_burst;
+ dev->enqueue_burst = null_crypto_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ return 0;
+
+init_error:
+ NULL_CRYPTO_LOG_ERR("driver %s: cryptodev_null_create failed",
+ init_params->name);
+ cryptodev_null_remove(init_params->name);
+
+ return -EFAULT;
+}
+
+/** Initialise null crypto device */
+static int
+cryptodev_null_probe(struct rte_vdev_device *dev)
+{
+ struct rte_crypto_vdev_init_params init_params = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id(),
+ {0}
+ };
+ const char *name;
+
+ name = rte_vdev_device_name(dev);
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n",
+ name, init_params.socket_id);
+ if (init_params.name[0] != '\0')
+ RTE_LOG(INFO, PMD, " User defined name = %s\n",
+ init_params.name);
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.max_nb_sessions);
+
+ return cryptodev_null_create(name, &init_params);
+}
+
+/** Uninitialise null crypto device */
+static int
+cryptodev_null_remove(const char *name)
+{
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Closing null crypto device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+static int
+cryptodev_null_remove_dev(struct rte_vdev_device *dev)
+{
+ return cryptodev_null_remove(rte_vdev_device_name(dev));
+}
+
+static struct rte_vdev_driver cryptodev_null_pmd_drv = {
+ .probe = cryptodev_null_probe,
+ .remove = cryptodev_null_remove_dev,
+};
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_NULL_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int>");
diff --git a/src/seastar/dpdk/drivers/crypto/null/null_crypto_pmd_ops.c b/src/seastar/dpdk/drivers/crypto/null/null_crypto_pmd_ops.c
new file mode 100644
index 00000000..12c946c9
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/null/null_crypto_pmd_ops.c
@@ -0,0 +1,356 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "null_crypto_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities null_crypto_pmd_capabilities[] = {
+ { /* NULL (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, },
+ }, },
+ },
+ { /* NULL (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, },
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+null_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+null_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+null_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+null_crypto_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Get device statistics */
+static void
+null_crypto_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct null_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+null_crypto_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct null_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+null_crypto_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct null_crypto_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->dev_type = dev->dev_type;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = null_crypto_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+null_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+null_crypto_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct null_crypto_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "null_crypto_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+null_crypto_pmd_qp_create_processed_pkts_ring(struct null_crypto_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ NULL_CRYPTO_LOG_INFO(
+ "Reusing existing ring %s for processed packets",
+ qp->name);
+ return r;
+ }
+
+ NULL_CRYPTO_LOG_INFO(
+ "Unable to reuse existing ring %s for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct null_crypto_private *internals = dev->data->dev_private;
+ struct null_crypto_qp *qp;
+ int retval;
+
+ if (qp_id >= internals->max_nb_qpairs) {
+ NULL_CRYPTO_LOG_ERR("Invalid qp_id %u, greater than maximum "
+ "number of queue pairs supported (%u).",
+ qp_id, internals->max_nb_qpairs);
+ return (-EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ null_crypto_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("Null Crypto PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL) {
+ NULL_CRYPTO_LOG_ERR("Failed to allocate queue pair memory");
+ return (-ENOMEM);
+ }
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ retval = null_crypto_pmd_qp_set_unique_name(dev, qp);
+ if (retval) {
+ NULL_CRYPTO_LOG_ERR("Failed to create unique name for null "
+ "crypto device");
+ goto qp_setup_cleanup;
+ }
+
+ qp->processed_pkts = null_crypto_pmd_qp_create_processed_pkts_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_pkts == NULL) {
+ NULL_CRYPTO_LOG_ERR("Failed to create unique name for null "
+ "crypto device");
+ goto qp_setup_cleanup;
+ }
+
+ qp->sess_mp = dev->data->session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Start queue pair */
+static int
+null_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+null_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+null_crypto_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the NULL crypto session structure */
+static unsigned
+null_crypto_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct null_crypto_session);
+}
+
+/** Configure a null crypto session from a crypto xform chain */
+static void *
+null_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ int retval;
+
+ if (unlikely(sess == NULL)) {
+ NULL_CRYPTO_LOG_ERR("invalid session struct");
+ return NULL;
+ }
+ retval = null_crypto_set_session_parameters(
+ (struct null_crypto_session *)sess, xform);
+ if (retval != 0) {
+ NULL_CRYPTO_LOG_ERR("failed configure session parameters");
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+null_crypto_pmd_session_clear(struct rte_cryptodev *dev __rte_unused,
+ void *sess)
+{
+ if (sess)
+ memset(sess, 0, sizeof(struct null_crypto_session));
+}
+
+struct rte_cryptodev_ops pmd_ops = {
+ .dev_configure = null_crypto_pmd_config,
+ .dev_start = null_crypto_pmd_start,
+ .dev_stop = null_crypto_pmd_stop,
+ .dev_close = null_crypto_pmd_close,
+
+ .stats_get = null_crypto_pmd_stats_get,
+ .stats_reset = null_crypto_pmd_stats_reset,
+
+ .dev_infos_get = null_crypto_pmd_info_get,
+
+ .queue_pair_setup = null_crypto_pmd_qp_setup,
+ .queue_pair_release = null_crypto_pmd_qp_release,
+ .queue_pair_start = null_crypto_pmd_qp_start,
+ .queue_pair_stop = null_crypto_pmd_qp_stop,
+ .queue_pair_count = null_crypto_pmd_qp_count,
+
+ .session_get_size = null_crypto_pmd_session_get_size,
+ .session_configure = null_crypto_pmd_session_configure,
+ .session_clear = null_crypto_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *null_crypto_pmd_ops = &pmd_ops;
diff --git a/src/seastar/dpdk/drivers/crypto/null/null_crypto_pmd_private.h b/src/seastar/dpdk/drivers/crypto/null/null_crypto_pmd_private.h
new file mode 100644
index 00000000..acebc973
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/null/null_crypto_pmd_private.h
@@ -0,0 +1,93 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NULL_CRYPTO_PMD_PRIVATE_H_
+#define _NULL_CRYPTO_PMD_PRIVATE_H_
+
+#include "rte_config.h"
+
+#define NULL_CRYPTO_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_NULL_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_NULL_CRYPTO_DEBUG
+#define NULL_CRYPTO_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_NULL_PMD), \
+ __func__, __LINE__, ## args)
+
+#define NULL_CRYPTO_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_NULL_PMD), \
+ __func__, __LINE__, ## args)
+#else
+#define NULL_CRYPTO_LOG_INFO(fmt, args...)
+#define NULL_CRYPTO_LOG_DBG(fmt, args...)
+#endif
+
+
+/** private data structure for each NULL crypto device */
+struct null_crypto_private {
+ unsigned max_nb_qpairs; /**< Max number of queue pairs */
+ unsigned max_nb_sessions; /**< Max number of sessions */
+};
+
+/** NULL crypto queue pair */
+struct null_crypto_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_pkts;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+} __rte_cache_aligned;
+
+
+/** NULL crypto private session structure */
+struct null_crypto_session {
+ uint32_t reserved;
+} __rte_cache_aligned;
+
+/** Set and validate NULL crypto session parameters */
+extern int
+null_crypto_set_session_parameters(struct null_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *null_crypto_pmd_ops;
+
+#endif /* _NULL_CRYPTO_PMD_PRIVATE_H_ */
diff --git a/src/seastar/dpdk/drivers/crypto/null/rte_pmd_null_crypto_version.map b/src/seastar/dpdk/drivers/crypto/null/rte_pmd_null_crypto_version.map
new file mode 100644
index 00000000..dc4d417b
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/null/rte_pmd_null_crypto_version.map
@@ -0,0 +1,3 @@
+DPDK_16.04 {
+ local: *;
+};
diff --git a/src/seastar/dpdk/drivers/crypto/openssl/Makefile b/src/seastar/dpdk/drivers/crypto/openssl/Makefile
new file mode 100644
index 00000000..e5fdfb59
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/openssl/Makefile
@@ -0,0 +1,53 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_openssl.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_openssl_version.map
+
+# external library dependencies
+LDLIBS += -lcrypto
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += rte_openssl_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += rte_openssl_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/seastar/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c b/src/seastar/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c
new file mode 100644
index 00000000..f0c5ca3c
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/openssl/rte_openssl_pmd.c
@@ -0,0 +1,1374 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include <openssl/evp.h>
+
+#include "rte_openssl_pmd_private.h"
+
+#define DES_BLOCK_SIZE 8
+
+static int cryptodev_openssl_remove(struct rte_vdev_device *vdev);
+
+/*----------------------------------------------------------------------------*/
+
+/**
+ * Increment counter by 1
+ * Counter is 64 bit array, big-endian
+ */
+static void
+ctr_inc(uint8_t *ctr)
+{
+ uint64_t *ctr64 = (uint64_t *)ctr;
+
+ *ctr64 = __builtin_bswap64(*ctr64);
+ (*ctr64)++;
+ *ctr64 = __builtin_bswap64(*ctr64);
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Session Prepare
+ *------------------------------------------------------------------------------
+ */
+
+/** Get xform chain order */
+static enum openssl_chain_order
+openssl_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+ enum openssl_chain_order res = OPENSSL_CHAIN_NOT_SUPPORTED;
+
+ if (xform != NULL) {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ res = OPENSSL_CHAIN_ONLY_AUTH;
+ else if (xform->next->type ==
+ RTE_CRYPTO_SYM_XFORM_CIPHER)
+ res = OPENSSL_CHAIN_AUTH_CIPHER;
+ }
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ res = OPENSSL_CHAIN_ONLY_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ res = OPENSSL_CHAIN_CIPHER_AUTH;
+ }
+ }
+
+ return res;
+}
+
+/** Get session cipher key from input cipher key */
+static void
+get_cipher_key(uint8_t *input_key, int keylen, uint8_t *session_key)
+{
+ memcpy(session_key, input_key, keylen);
+}
+
+/** Get key ede 24 bytes standard from input key */
+static int
+get_cipher_key_ede(uint8_t *key, int keylen, uint8_t *key_ede)
+{
+ int res = 0;
+
+ /* Initialize keys - 24 bytes: [key1-key2-key3] */
+ switch (keylen) {
+ case 24:
+ memcpy(key_ede, key, 24);
+ break;
+ case 16:
+ /* K3 = K1 */
+ memcpy(key_ede, key, 16);
+ memcpy(key_ede + 16, key, 8);
+ break;
+ case 8:
+ /* K1 = K2 = K3 (DES compatibility) */
+ memcpy(key_ede, key, 8);
+ memcpy(key_ede + 8, key, 8);
+ memcpy(key_ede + 16, key, 8);
+ break;
+ default:
+ OPENSSL_LOG_ERR("Unsupported key size");
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+/** Get adequate openssl function for input cipher algorithm */
+static uint8_t
+get_cipher_algo(enum rte_crypto_cipher_algorithm sess_algo, size_t keylen,
+ const EVP_CIPHER **algo)
+{
+ int res = 0;
+
+ if (algo != NULL) {
+ switch (sess_algo) {
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_des_ede_cbc();
+ break;
+ case 24:
+ *algo = EVP_des_ede3_cbc();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_aes_128_cbc();
+ break;
+ case 24:
+ *algo = EVP_aes_192_cbc();
+ break;
+ case 32:
+ *algo = EVP_aes_256_cbc();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_aes_128_ctr();
+ break;
+ case 24:
+ *algo = EVP_aes_192_ctr();
+ break;
+ case 32:
+ *algo = EVP_aes_256_ctr();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ case RTE_CRYPTO_CIPHER_AES_GCM:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_aes_128_gcm();
+ break;
+ case 24:
+ *algo = EVP_aes_192_gcm();
+ break;
+ case 32:
+ *algo = EVP_aes_256_gcm();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ } else {
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+/** Get adequate openssl function for input auth algorithm */
+static uint8_t
+get_auth_algo(enum rte_crypto_auth_algorithm sessalgo,
+ const EVP_MD **algo)
+{
+ int res = 0;
+
+ if (algo != NULL) {
+ switch (sessalgo) {
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ *algo = EVP_md5();
+ break;
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ *algo = EVP_sha1();
+ break;
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ *algo = EVP_sha224();
+ break;
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ *algo = EVP_sha256();
+ break;
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ *algo = EVP_sha384();
+ break;
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ *algo = EVP_sha512();
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ } else {
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+/** Set session cipher parameters */
+static int
+openssl_set_session_cipher_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ /* Select cipher direction */
+ sess->cipher.direction = xform->cipher.op;
+ /* Select cipher key */
+ sess->cipher.key.length = xform->cipher.key.length;
+
+ /* Select cipher algo */
+ switch (xform->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ case RTE_CRYPTO_CIPHER_AES_GCM:
+ sess->cipher.mode = OPENSSL_CIPHER_LIB;
+ sess->cipher.algo = xform->cipher.algo;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_cipher_algo(sess->cipher.algo, sess->cipher.key.length,
+ &sess->cipher.evp_algo) != 0)
+ return -EINVAL;
+
+ get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
+ sess->cipher.key.data);
+
+ break;
+
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ sess->cipher.mode = OPENSSL_CIPHER_DES3CTR;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_cipher_key_ede(xform->cipher.key.data,
+ sess->cipher.key.length,
+ sess->cipher.key.data) != 0)
+ return -EINVAL;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+ sess->cipher.algo = xform->cipher.algo;
+ sess->chain_order = OPENSSL_CHAIN_CIPHER_BPI;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+ sess->cipher.evp_algo = EVP_des_cbc();
+
+ sess->cipher.bpi_ctx = EVP_CIPHER_CTX_new();
+ /* IV will be ECB encrypted whether direction is encrypt or decrypt */
+ if (EVP_EncryptInit_ex(sess->cipher.bpi_ctx, EVP_des_ecb(),
+ NULL, xform->cipher.key.data, 0) != 1)
+ return -EINVAL;
+
+ get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
+ sess->cipher.key.data);
+ break;
+ default:
+ sess->cipher.algo = RTE_CRYPTO_CIPHER_NULL;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Set session auth parameters */
+static int
+openssl_set_session_auth_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ /* Select auth generate/verify */
+ sess->auth.operation = xform->auth.op;
+ sess->auth.algo = xform->auth.algo;
+
+ /* Select auth algo */
+ switch (xform->auth.algo) {
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_AES_GCM:
+ /* Check additional condition for AES_GMAC/GCM */
+ if (sess->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM)
+ return -EINVAL;
+ sess->chain_order = OPENSSL_CHAIN_COMBINED;
+ break;
+
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_SHA512:
+ sess->auth.mode = OPENSSL_AUTH_AS_AUTH;
+ if (get_auth_algo(xform->auth.algo,
+ &sess->auth.auth.evp_algo) != 0)
+ return -EINVAL;
+ sess->auth.auth.ctx = EVP_MD_CTX_create();
+ break;
+
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ sess->auth.mode = OPENSSL_AUTH_AS_HMAC;
+ sess->auth.hmac.ctx = EVP_MD_CTX_create();
+ if (get_auth_algo(xform->auth.algo,
+ &sess->auth.hmac.evp_algo) != 0)
+ return -EINVAL;
+ sess->auth.hmac.pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL,
+ xform->auth.key.data, xform->auth.key.length);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+openssl_set_session_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+
+ sess->chain_order = openssl_get_chain_order(xform);
+ switch (sess->chain_order) {
+ case OPENSSL_CHAIN_ONLY_CIPHER:
+ cipher_xform = xform;
+ break;
+ case OPENSSL_CHAIN_ONLY_AUTH:
+ auth_xform = xform;
+ break;
+ case OPENSSL_CHAIN_CIPHER_AUTH:
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ break;
+ case OPENSSL_CHAIN_AUTH_CIPHER:
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* cipher_xform must be check before auth_xform */
+ if (cipher_xform) {
+ if (openssl_set_session_cipher_parameters(
+ sess, cipher_xform)) {
+ OPENSSL_LOG_ERR(
+ "Invalid/unsupported cipher parameters");
+ return -EINVAL;
+ }
+ }
+
+ if (auth_xform) {
+ if (openssl_set_session_auth_parameters(sess, auth_xform)) {
+ OPENSSL_LOG_ERR(
+ "Invalid/unsupported auth parameters");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/** Reset private session parameters */
+void
+openssl_reset_session(struct openssl_session *sess)
+{
+ EVP_CIPHER_CTX_free(sess->cipher.ctx);
+
+ if (sess->chain_order == OPENSSL_CHAIN_CIPHER_BPI)
+ EVP_CIPHER_CTX_free(sess->cipher.bpi_ctx);
+
+ switch (sess->auth.mode) {
+ case OPENSSL_AUTH_AS_AUTH:
+ EVP_MD_CTX_destroy(sess->auth.auth.ctx);
+ break;
+ case OPENSSL_AUTH_AS_HMAC:
+ EVP_PKEY_free(sess->auth.hmac.pkey);
+ EVP_MD_CTX_destroy(sess->auth.hmac.ctx);
+ break;
+ default:
+ break;
+ }
+}
+
+/** Provide session for operation */
+static struct openssl_session *
+get_session(struct openssl_qp *qp, struct rte_crypto_op *op)
+{
+ struct openssl_session *sess = NULL;
+
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ /* get existing session */
+ if (likely(op->sym->session != NULL &&
+ op->sym->session->dev_type ==
+ RTE_CRYPTODEV_OPENSSL_PMD))
+ sess = (struct openssl_session *)
+ op->sym->session->_private;
+ } else {
+ /* provide internal session */
+ void *_sess = NULL;
+
+ if (!rte_mempool_get(qp->sess_mp, (void **)&_sess)) {
+ sess = (struct openssl_session *)
+ ((struct rte_cryptodev_sym_session *)_sess)
+ ->_private;
+
+ if (unlikely(openssl_set_session_parameters(
+ sess, op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ sess = NULL;
+ } else
+ op->sym->session = _sess;
+ }
+ }
+
+ if (sess == NULL)
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+ return sess;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Process Operations
+ *------------------------------------------------------------------------------
+ */
+static inline int
+process_openssl_encryption_update(struct rte_mbuf *mbuf_src, int offset,
+ uint8_t **dst, int srclen, EVP_CIPHER_CTX *ctx)
+{
+ struct rte_mbuf *m;
+ int dstlen;
+ int l, n = srclen;
+ uint8_t *src;
+
+ for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
+ m = m->next)
+ offset -= rte_pktmbuf_data_len(m);
+
+ if (m == 0)
+ return -1;
+
+ src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+
+ l = rte_pktmbuf_data_len(m) - offset;
+ if (srclen <= l) {
+ if (EVP_EncryptUpdate(ctx, *dst, &dstlen, src, srclen) <= 0)
+ return -1;
+ *dst += l;
+ return 0;
+ }
+
+ if (EVP_EncryptUpdate(ctx, *dst, &dstlen, src, l) <= 0)
+ return -1;
+
+ *dst += dstlen;
+ n -= l;
+
+ for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
+ src = rte_pktmbuf_mtod(m, uint8_t *);
+ l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
+ if (EVP_EncryptUpdate(ctx, *dst, &dstlen, src, l) <= 0)
+ return -1;
+ *dst += dstlen;
+ n -= l;
+ }
+
+ return 0;
+}
+
+static inline int
+process_openssl_decryption_update(struct rte_mbuf *mbuf_src, int offset,
+ uint8_t **dst, int srclen, EVP_CIPHER_CTX *ctx)
+{
+ struct rte_mbuf *m;
+ int dstlen;
+ int l, n = srclen;
+ uint8_t *src;
+
+ for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
+ m = m->next)
+ offset -= rte_pktmbuf_data_len(m);
+
+ if (m == 0)
+ return -1;
+
+ src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+
+ l = rte_pktmbuf_data_len(m) - offset;
+ if (srclen <= l) {
+ if (EVP_DecryptUpdate(ctx, *dst, &dstlen, src, srclen) <= 0)
+ return -1;
+ *dst += l;
+ return 0;
+ }
+
+ if (EVP_DecryptUpdate(ctx, *dst, &dstlen, src, l) <= 0)
+ return -1;
+
+ *dst += dstlen;
+ n -= l;
+
+ for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
+ src = rte_pktmbuf_mtod(m, uint8_t *);
+ l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
+ if (EVP_DecryptUpdate(ctx, *dst, &dstlen, src, l) <= 0)
+ return -1;
+ *dst += dstlen;
+ n -= l;
+ }
+
+ return 0;
+}
+
+/** Process standard openssl cipher encryption */
+static int
+process_openssl_cipher_encrypt(struct rte_mbuf *mbuf_src, uint8_t *dst,
+ int offset, uint8_t *iv, uint8_t *key, int srclen,
+ EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
+{
+ int totlen;
+
+ if (EVP_EncryptInit_ex(ctx, algo, NULL, key, iv) <= 0)
+ goto process_cipher_encrypt_err;
+
+ EVP_CIPHER_CTX_set_padding(ctx, 0);
+
+ if (process_openssl_encryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
+ goto process_cipher_encrypt_err;
+
+ if (EVP_EncryptFinal_ex(ctx, dst, &totlen) <= 0)
+ goto process_cipher_encrypt_err;
+
+ return 0;
+
+process_cipher_encrypt_err:
+ OPENSSL_LOG_ERR("Process openssl cipher encrypt failed");
+ return -EINVAL;
+}
+
+/** Process standard openssl cipher encryption */
+static int
+process_openssl_cipher_bpi_encrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int srclen,
+ EVP_CIPHER_CTX *ctx)
+{
+ uint8_t i;
+ uint8_t encrypted_iv[DES_BLOCK_SIZE];
+ int encrypted_ivlen;
+
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen,
+ iv, DES_BLOCK_SIZE) <= 0)
+ goto process_cipher_encrypt_err;
+
+ for (i = 0; i < srclen; i++)
+ *(dst + i) = *(src + i) ^ (encrypted_iv[i]);
+
+ return 0;
+
+process_cipher_encrypt_err:
+ OPENSSL_LOG_ERR("Process openssl cipher bpi encrypt failed");
+ return -EINVAL;
+}
+/** Process standard openssl cipher decryption */
+static int
+process_openssl_cipher_decrypt(struct rte_mbuf *mbuf_src, uint8_t *dst,
+ int offset, uint8_t *iv, uint8_t *key, int srclen,
+ EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
+{
+ int totlen;
+
+ if (EVP_DecryptInit_ex(ctx, algo, NULL, key, iv) <= 0)
+ goto process_cipher_decrypt_err;
+
+ EVP_CIPHER_CTX_set_padding(ctx, 0);
+
+ if (process_openssl_decryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
+ goto process_cipher_decrypt_err;
+
+ if (EVP_DecryptFinal_ex(ctx, dst, &totlen) <= 0)
+ goto process_cipher_decrypt_err;
+ return 0;
+
+process_cipher_decrypt_err:
+ OPENSSL_LOG_ERR("Process openssl cipher decrypt failed");
+ return -EINVAL;
+}
+
+/** Process cipher des 3 ctr encryption, decryption algorithm */
+static int
+process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst,
+ int offset, uint8_t *iv, uint8_t *key, int srclen,
+ EVP_CIPHER_CTX *ctx)
+{
+ uint8_t ebuf[8], ctr[8];
+ int unused, n;
+ struct rte_mbuf *m;
+ uint8_t *src;
+ int l;
+
+ for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
+ m = m->next)
+ offset -= rte_pktmbuf_data_len(m);
+
+ if (m == 0)
+ goto process_cipher_des3ctr_err;
+
+ src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+ l = rte_pktmbuf_data_len(m) - offset;
+
+ /* We use 3DES encryption also for decryption.
+ * IV is not important for 3DES ecb
+ */
+ if (EVP_EncryptInit_ex(ctx, EVP_des_ede3_ecb(), NULL, key, NULL) <= 0)
+ goto process_cipher_des3ctr_err;
+
+ memcpy(ctr, iv, 8);
+
+ for (n = 0; n < srclen; n++) {
+ if (n % 8 == 0) {
+ if (EVP_EncryptUpdate(ctx,
+ (unsigned char *)&ebuf, &unused,
+ (const unsigned char *)&ctr, 8) <= 0)
+ goto process_cipher_des3ctr_err;
+ ctr_inc(ctr);
+ }
+ dst[n] = *(src++) ^ ebuf[n % 8];
+
+ l--;
+ if (!l) {
+ m = m->next;
+ if (m) {
+ src = rte_pktmbuf_mtod(m, uint8_t *);
+ l = rte_pktmbuf_data_len(m);
+ }
+ }
+ }
+
+ return 0;
+
+process_cipher_des3ctr_err:
+ OPENSSL_LOG_ERR("Process openssl cipher des 3 ede ctr failed");
+ return -EINVAL;
+}
+
+/** Process auth/encription aes-gcm algorithm */
+static int
+process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset,
+ int srclen, uint8_t *aad, int aadlen, uint8_t *iv, int ivlen,
+ uint8_t *key, uint8_t *dst, uint8_t *tag,
+ EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
+{
+ int len = 0, unused = 0;
+ uint8_t empty[] = {};
+
+ if (EVP_EncryptInit_ex(ctx, algo, NULL, NULL, NULL) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN, ivlen, NULL) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (EVP_EncryptInit_ex(ctx, NULL, NULL, key, iv) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (aadlen > 0)
+ if (EVP_EncryptUpdate(ctx, NULL, &len, aad, aadlen) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (srclen > 0)
+ if (process_openssl_encryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
+ goto process_auth_encryption_gcm_err;
+
+ /* Workaround open ssl bug in version less then 1.0.1f */
+ if (EVP_EncryptUpdate(ctx, empty, &unused, empty, 0) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (EVP_EncryptFinal_ex(ctx, dst, &len) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_GET_TAG, 16, tag) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ return 0;
+
+process_auth_encryption_gcm_err:
+ OPENSSL_LOG_ERR("Process openssl auth encryption gcm failed");
+ return -EINVAL;
+}
+
+static int
+process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset,
+ int srclen, uint8_t *aad, int aadlen, uint8_t *iv, int ivlen,
+ uint8_t *key, uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx,
+ const EVP_CIPHER *algo)
+{
+ int len = 0, unused = 0;
+ uint8_t empty[] = {};
+
+ if (EVP_DecryptInit_ex(ctx, algo, NULL, NULL, NULL) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN, ivlen, NULL) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, tag) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (EVP_DecryptInit_ex(ctx, NULL, NULL, key, iv) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (aadlen > 0)
+ if (EVP_DecryptUpdate(ctx, NULL, &len, aad, aadlen) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (srclen > 0)
+ if (process_openssl_decryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
+ goto process_auth_decryption_gcm_err;
+
+ /* Workaround open ssl bug in version less then 1.0.1f */
+ if (EVP_DecryptUpdate(ctx, empty, &unused, empty, 0) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (EVP_DecryptFinal_ex(ctx, dst, &len) <= 0)
+ goto process_auth_decryption_gcm_final_err;
+
+ return 0;
+
+process_auth_decryption_gcm_err:
+ OPENSSL_LOG_ERR("Process openssl auth description gcm failed");
+ return -EINVAL;
+
+process_auth_decryption_gcm_final_err:
+ return -EFAULT;
+}
+
+/** Process standard openssl auth algorithms */
+static int
+process_openssl_auth(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset,
+ __rte_unused uint8_t *iv, __rte_unused EVP_PKEY * pkey,
+ int srclen, EVP_MD_CTX *ctx, const EVP_MD *algo)
+{
+ size_t dstlen;
+ struct rte_mbuf *m;
+ int l, n = srclen;
+ uint8_t *src;
+
+ for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
+ m = m->next)
+ offset -= rte_pktmbuf_data_len(m);
+
+ if (m == 0)
+ goto process_auth_err;
+
+ if (EVP_DigestInit_ex(ctx, algo, NULL) <= 0)
+ goto process_auth_err;
+
+ src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+
+ l = rte_pktmbuf_data_len(m) - offset;
+ if (srclen <= l) {
+ if (EVP_DigestUpdate(ctx, (char *)src, srclen) <= 0)
+ goto process_auth_err;
+ goto process_auth_final;
+ }
+
+ if (EVP_DigestUpdate(ctx, (char *)src, l) <= 0)
+ goto process_auth_err;
+
+ n -= l;
+
+ for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
+ src = rte_pktmbuf_mtod(m, uint8_t *);
+ l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
+ if (EVP_DigestUpdate(ctx, (char *)src, l) <= 0)
+ goto process_auth_err;
+ n -= l;
+ }
+
+process_auth_final:
+ if (EVP_DigestFinal_ex(ctx, dst, (unsigned int *)&dstlen) <= 0)
+ goto process_auth_err;
+ return 0;
+
+process_auth_err:
+ OPENSSL_LOG_ERR("Process openssl auth failed");
+ return -EINVAL;
+}
+
+/** Process standard openssl auth algorithms with hmac */
+static int
+process_openssl_auth_hmac(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset,
+ __rte_unused uint8_t *iv, EVP_PKEY *pkey,
+ int srclen, EVP_MD_CTX *ctx, const EVP_MD *algo)
+{
+ size_t dstlen;
+ struct rte_mbuf *m;
+ int l, n = srclen;
+ uint8_t *src;
+
+ for (m = mbuf_src; m != NULL && offset > rte_pktmbuf_data_len(m);
+ m = m->next)
+ offset -= rte_pktmbuf_data_len(m);
+
+ if (m == 0)
+ goto process_auth_err;
+
+ if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
+ goto process_auth_err;
+
+ src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
+
+ l = rte_pktmbuf_data_len(m) - offset;
+ if (srclen <= l) {
+ if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
+ goto process_auth_err;
+ goto process_auth_final;
+ }
+
+ if (EVP_DigestSignUpdate(ctx, (char *)src, l) <= 0)
+ goto process_auth_err;
+
+ n -= l;
+
+ for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
+ src = rte_pktmbuf_mtod(m, uint8_t *);
+ l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
+ if (EVP_DigestSignUpdate(ctx, (char *)src, l) <= 0)
+ goto process_auth_err;
+ n -= l;
+ }
+
+process_auth_final:
+ if (EVP_DigestSignFinal(ctx, dst, &dstlen) <= 0)
+ goto process_auth_err;
+
+ return 0;
+
+process_auth_err:
+ OPENSSL_LOG_ERR("Process openssl auth failed");
+ return -EINVAL;
+}
+
+/*----------------------------------------------------------------------------*/
+
+/** Process auth/cipher combined operation */
+static void
+process_openssl_combined_op
+ (struct rte_crypto_op *op, struct openssl_session *sess,
+ struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
+{
+ /* cipher */
+ uint8_t *dst = NULL, *iv, *tag, *aad;
+ int srclen, ivlen, aadlen, status = -1;
+
+ /*
+ * Segmented destination buffer is not supported for
+ * encryption/decryption
+ */
+ if (!rte_pktmbuf_is_contiguous(mbuf_dst)) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ iv = op->sym->cipher.iv.data;
+ ivlen = op->sym->cipher.iv.length;
+ aad = op->sym->auth.aad.data;
+ aadlen = op->sym->auth.aad.length;
+
+ tag = op->sym->auth.digest.data;
+ if (tag == NULL)
+ tag = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->cipher.data.offset +
+ op->sym->cipher.data.length);
+
+ if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
+ srclen = 0;
+ else {
+ srclen = op->sym->cipher.data.length;
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->cipher.data.offset);
+ }
+
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ status = process_openssl_auth_encryption_gcm(
+ mbuf_src, op->sym->cipher.data.offset, srclen,
+ aad, aadlen, iv, ivlen, sess->cipher.key.data,
+ dst, tag, sess->cipher.ctx,
+ sess->cipher.evp_algo);
+ else
+ status = process_openssl_auth_decryption_gcm(
+ mbuf_src, op->sym->cipher.data.offset, srclen,
+ aad, aadlen, iv, ivlen, sess->cipher.key.data,
+ dst, tag, sess->cipher.ctx,
+ sess->cipher.evp_algo);
+
+ if (status != 0) {
+ if (status == (-EFAULT) &&
+ sess->auth.operation ==
+ RTE_CRYPTO_AUTH_OP_VERIFY)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ else
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+}
+
+/** Process cipher operation */
+static void
+process_openssl_cipher_op
+ (struct rte_crypto_op *op, struct openssl_session *sess,
+ struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
+{
+ uint8_t *dst, *iv;
+ int srclen, status;
+
+ /*
+ * Segmented destination buffer is not supported for
+ * encryption/decryption
+ */
+ if (!rte_pktmbuf_is_contiguous(mbuf_dst)) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return;
+ }
+
+ srclen = op->sym->cipher.data.length;
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->cipher.data.offset);
+
+ iv = op->sym->cipher.iv.data;
+
+ if (sess->cipher.mode == OPENSSL_CIPHER_LIB)
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ status = process_openssl_cipher_encrypt(mbuf_src, dst,
+ op->sym->cipher.data.offset, iv,
+ sess->cipher.key.data, srclen,
+ sess->cipher.ctx,
+ sess->cipher.evp_algo);
+ else
+ status = process_openssl_cipher_decrypt(mbuf_src, dst,
+ op->sym->cipher.data.offset, iv,
+ sess->cipher.key.data, srclen,
+ sess->cipher.ctx,
+ sess->cipher.evp_algo);
+ else
+ status = process_openssl_cipher_des3ctr(mbuf_src, dst,
+ op->sym->cipher.data.offset, iv,
+ sess->cipher.key.data, srclen,
+ sess->cipher.ctx);
+
+ if (status != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+}
+
+/** Process cipher operation */
+static void
+process_openssl_docsis_bpi_op(struct rte_crypto_op *op,
+ struct openssl_session *sess, struct rte_mbuf *mbuf_src,
+ struct rte_mbuf *mbuf_dst)
+{
+ uint8_t *src, *dst, *iv;
+ uint8_t block_size, last_block_len;
+ int srclen, status = 0;
+
+ srclen = op->sym->cipher.data.length;
+ src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->cipher.data.offset);
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->cipher.data.offset);
+
+ iv = op->sym->cipher.iv.data;
+
+ block_size = DES_BLOCK_SIZE;
+
+ last_block_len = srclen % block_size;
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ /* Encrypt only with ECB mode XOR IV */
+ if (srclen < block_size) {
+ status = process_openssl_cipher_bpi_encrypt(src, dst,
+ iv, srclen,
+ sess->cipher.bpi_ctx);
+ } else {
+ srclen -= last_block_len;
+ /* Encrypt with the block aligned stream with CBC mode */
+ status = process_openssl_cipher_encrypt(mbuf_src, dst,
+ op->sym->cipher.data.offset, iv,
+ sess->cipher.key.data, srclen,
+ sess->cipher.ctx, sess->cipher.evp_algo);
+ if (last_block_len) {
+ /* Point at last block */
+ dst += srclen;
+ /*
+ * IV is the last encrypted block from
+ * the previous operation
+ */
+ iv = dst - block_size;
+ src += srclen;
+ srclen = last_block_len;
+ /* Encrypt the last frame with ECB mode */
+ status |= process_openssl_cipher_bpi_encrypt(src,
+ dst, iv,
+ srclen, sess->cipher.bpi_ctx);
+ }
+ }
+ } else {
+ /* Decrypt only with ECB mode (encrypt, as it is same operation) */
+ if (srclen < block_size) {
+ status = process_openssl_cipher_bpi_encrypt(src, dst,
+ iv,
+ srclen,
+ sess->cipher.bpi_ctx);
+ } else {
+ if (last_block_len) {
+ /* Point at last block */
+ dst += srclen - last_block_len;
+ src += srclen - last_block_len;
+ /*
+ * IV is the last full block
+ */
+ iv = src - block_size;
+ /*
+ * Decrypt the last frame with ECB mode
+ * (encrypt, as it is the same operation)
+ */
+ status = process_openssl_cipher_bpi_encrypt(src,
+ dst, iv,
+ last_block_len, sess->cipher.bpi_ctx);
+ /* Prepare parameters for CBC mode op */
+ iv = op->sym->cipher.iv.data;
+ dst += last_block_len - srclen;
+ srclen -= last_block_len;
+ }
+
+ /* Decrypt with CBC mode */
+ status |= process_openssl_cipher_decrypt(mbuf_src, dst,
+ op->sym->cipher.data.offset, iv,
+ sess->cipher.key.data, srclen,
+ sess->cipher.ctx,
+ sess->cipher.evp_algo);
+ }
+ }
+
+ if (status != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+}
+
+/** Process auth operation */
+static void
+process_openssl_auth_op
+ (struct rte_crypto_op *op, struct openssl_session *sess,
+ struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
+{
+ uint8_t *dst;
+ int srclen, status;
+
+ srclen = op->sym->auth.data.length;
+
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
+ dst = (uint8_t *)rte_pktmbuf_append(mbuf_src,
+ op->sym->auth.digest.length);
+ else {
+ dst = op->sym->auth.digest.data;
+ if (dst == NULL)
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->auth.data.offset +
+ op->sym->auth.data.length);
+ }
+
+ switch (sess->auth.mode) {
+ case OPENSSL_AUTH_AS_AUTH:
+ status = process_openssl_auth(mbuf_src, dst,
+ op->sym->auth.data.offset, NULL, NULL, srclen,
+ sess->auth.auth.ctx, sess->auth.auth.evp_algo);
+ break;
+ case OPENSSL_AUTH_AS_HMAC:
+ status = process_openssl_auth_hmac(mbuf_src, dst,
+ op->sym->auth.data.offset, NULL,
+ sess->auth.hmac.pkey, srclen,
+ sess->auth.hmac.ctx, sess->auth.hmac.evp_algo);
+ break;
+ default:
+ status = -1;
+ break;
+ }
+
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ if (memcmp(dst, op->sym->auth.digest.data,
+ op->sym->auth.digest.length) != 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ }
+ /* Trim area used for digest from mbuf. */
+ rte_pktmbuf_trim(mbuf_src, op->sym->auth.digest.length);
+ }
+
+ if (status != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+}
+
+/** Process crypto operation for mbuf */
+static int
+process_op(const struct openssl_qp *qp, struct rte_crypto_op *op,
+ struct openssl_session *sess)
+{
+ struct rte_mbuf *msrc, *mdst;
+ int retval;
+
+ msrc = op->sym->m_src;
+ mdst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ switch (sess->chain_order) {
+ case OPENSSL_CHAIN_ONLY_CIPHER:
+ process_openssl_cipher_op(op, sess, msrc, mdst);
+ break;
+ case OPENSSL_CHAIN_ONLY_AUTH:
+ process_openssl_auth_op(op, sess, msrc, mdst);
+ break;
+ case OPENSSL_CHAIN_CIPHER_AUTH:
+ process_openssl_cipher_op(op, sess, msrc, mdst);
+ process_openssl_auth_op(op, sess, mdst, mdst);
+ break;
+ case OPENSSL_CHAIN_AUTH_CIPHER:
+ process_openssl_auth_op(op, sess, msrc, mdst);
+ process_openssl_cipher_op(op, sess, msrc, mdst);
+ break;
+ case OPENSSL_CHAIN_COMBINED:
+ process_openssl_combined_op(op, sess, msrc, mdst);
+ break;
+ case OPENSSL_CHAIN_CIPHER_BPI:
+ process_openssl_docsis_bpi_op(op, sess, msrc, mdst);
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ break;
+ }
+
+ /* Free session if a session-less crypto op */
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ openssl_reset_session(sess);
+ memset(sess, 0, sizeof(struct openssl_session));
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ if (op->status != RTE_CRYPTO_OP_STATUS_ERROR)
+ retval = rte_ring_enqueue(qp->processed_ops, (void *)op);
+ else
+ retval = -1;
+
+ return retval;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * PMD Framework
+ *------------------------------------------------------------------------------
+ */
+
+/** Enqueue burst */
+static uint16_t
+openssl_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct openssl_session *sess;
+ struct openssl_qp *qp = queue_pair;
+ int i, retval;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_session(qp, ops[i]);
+ if (unlikely(sess == NULL))
+ goto enqueue_err;
+
+ retval = process_op(qp, ops[i], sess);
+ if (unlikely(retval < 0))
+ goto enqueue_err;
+ }
+
+ qp->stats.enqueued_count += i;
+ return i;
+
+enqueue_err:
+ qp->stats.enqueue_err_count++;
+ return i;
+}
+
+/** Dequeue burst */
+static uint16_t
+openssl_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct openssl_qp *qp = queue_pair;
+
+ unsigned int nb_dequeued = 0;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)ops, nb_ops, NULL);
+ qp->stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/** Create OPENSSL crypto device */
+static int
+cryptodev_openssl_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_crypto_vdev_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct openssl_private *internals;
+
+ if (init_params->name[0] == '\0')
+ snprintf(init_params->name, sizeof(init_params->name),
+ "%s", name);
+
+ dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
+ sizeof(struct openssl_private),
+ init_params->socket_id);
+ if (dev == NULL) {
+ OPENSSL_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->dev_type = RTE_CRYPTODEV_OPENSSL_PMD;
+ dev->dev_ops = rte_openssl_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = openssl_pmd_dequeue_burst;
+ dev->enqueue_burst = openssl_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_CPU_AESNI |
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
+
+ /* Set vector instructions mode supported */
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ return 0;
+
+init_error:
+ OPENSSL_LOG_ERR("driver %s: cryptodev_openssl_create failed",
+ init_params->name);
+
+ cryptodev_openssl_remove(vdev);
+ return -EFAULT;
+}
+
+/** Initialise OPENSSL crypto device */
+static int
+cryptodev_openssl_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_crypto_vdev_init_params init_params = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id(),
+ {0}
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+
+ rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.socket_id);
+ if (init_params.name[0] != '\0')
+ RTE_LOG(INFO, PMD, " User defined name = %s\n",
+ init_params.name);
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.max_nb_sessions);
+
+ return cryptodev_openssl_create(name, vdev, &init_params);
+}
+
+/** Uninitialise OPENSSL crypto device */
+static int
+cryptodev_openssl_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD,
+ "Closing OPENSSL crypto device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+static struct rte_vdev_driver cryptodev_openssl_pmd_drv = {
+ .probe = cryptodev_openssl_probe,
+ .remove = cryptodev_openssl_remove
+};
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_OPENSSL_PMD,
+ cryptodev_openssl_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_OPENSSL_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int>");
diff --git a/src/seastar/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/src/seastar/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c
new file mode 100644
index 00000000..22a68730
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/openssl/rte_openssl_pmd_ops.c
@@ -0,0 +1,729 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_openssl_pmd_private.h"
+
+
+static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* MD5 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 128,
+ .max = 128,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 128,
+ .max = 128,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 65535,
+ .increment = 1
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* AES GMAC (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 8,
+ .max = 65532,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CTR,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* DES DOCSIS BPI */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/** Configure device */
+static int
+openssl_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+openssl_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+openssl_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+openssl_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+openssl_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct openssl_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+openssl_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct openssl_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ }
+}
+
+
+/** Get device info */
+static void
+openssl_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct openssl_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->dev_type = dev->dev_type;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = openssl_pmd_capabilities;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ }
+}
+
+/** Release queue pair */
+static int
+openssl_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+openssl_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct openssl_qp *qp)
+{
+ unsigned int n = snprintf(qp->name, sizeof(qp->name),
+ "openssl_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+
+/** Create a ring to place processed operations on */
+static struct rte_ring *
+openssl_pmd_qp_create_processed_ops_ring(struct openssl_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ OPENSSL_LOG_INFO(
+ "Reusing existing ring %s for processed ops",
+ qp->name);
+ return r;
+ }
+
+ OPENSSL_LOG_ERR(
+ "Unable to reuse existing ring %s for processed ops",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+
+/** Setup a queue pair */
+static int
+openssl_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct openssl_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ openssl_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("OPENSSL PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return -ENOMEM;
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (openssl_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = openssl_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = dev->data->session_pool;
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Start queue pair */
+static int
+openssl_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+openssl_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+openssl_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the session structure */
+static unsigned
+openssl_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct openssl_session);
+}
+
+/** Configure the session from a crypto xform chain */
+static void *
+openssl_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ if (unlikely(sess == NULL)) {
+ OPENSSL_LOG_ERR("invalid session struct");
+ return NULL;
+ }
+
+ if (openssl_set_session_parameters(
+ sess, xform) != 0) {
+ OPENSSL_LOG_ERR("failed configure session parameters");
+ return NULL;
+ }
+
+ return sess;
+}
+
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+openssl_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+ /*
+ * Current just resetting the whole data structure, need to investigate
+ * whether a more selective reset of key would be more performant
+ */
+ if (sess) {
+ openssl_reset_session(sess);
+ memset(sess, 0, sizeof(struct openssl_session));
+ }
+}
+
+struct rte_cryptodev_ops openssl_pmd_ops = {
+ .dev_configure = openssl_pmd_config,
+ .dev_start = openssl_pmd_start,
+ .dev_stop = openssl_pmd_stop,
+ .dev_close = openssl_pmd_close,
+
+ .stats_get = openssl_pmd_stats_get,
+ .stats_reset = openssl_pmd_stats_reset,
+
+ .dev_infos_get = openssl_pmd_info_get,
+
+ .queue_pair_setup = openssl_pmd_qp_setup,
+ .queue_pair_release = openssl_pmd_qp_release,
+ .queue_pair_start = openssl_pmd_qp_start,
+ .queue_pair_stop = openssl_pmd_qp_stop,
+ .queue_pair_count = openssl_pmd_qp_count,
+
+ .session_get_size = openssl_pmd_session_get_size,
+ .session_configure = openssl_pmd_session_configure,
+ .session_clear = openssl_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_openssl_pmd_ops = &openssl_pmd_ops;
diff --git a/src/seastar/dpdk/drivers/crypto/openssl/rte_openssl_pmd_private.h b/src/seastar/dpdk/drivers/crypto/openssl/rte_openssl_pmd_private.h
new file mode 100644
index 00000000..4d820c51
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/openssl/rte_openssl_pmd_private.h
@@ -0,0 +1,176 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _OPENSSL_PMD_PRIVATE_H_
+#define _OPENSSL_PMD_PRIVATE_H_
+
+#include <openssl/evp.h>
+#include <openssl/des.h>
+
+
+#define OPENSSL_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_OPENSSL_DEBUG
+#define OPENSSL_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD), \
+ __func__, __LINE__, ## args)
+
+#define OPENSSL_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD), \
+ __func__, __LINE__, ## args)
+#else
+#define OPENSSL_LOG_INFO(fmt, args...)
+#define OPENSSL_LOG_DBG(fmt, args...)
+#endif
+
+
+/** OPENSSL operation order mode enumerator */
+enum openssl_chain_order {
+ OPENSSL_CHAIN_ONLY_CIPHER,
+ OPENSSL_CHAIN_ONLY_AUTH,
+ OPENSSL_CHAIN_CIPHER_BPI,
+ OPENSSL_CHAIN_CIPHER_AUTH,
+ OPENSSL_CHAIN_AUTH_CIPHER,
+ OPENSSL_CHAIN_COMBINED,
+ OPENSSL_CHAIN_NOT_SUPPORTED
+};
+
+/** OPENSSL cipher mode enumerator */
+enum openssl_cipher_mode {
+ OPENSSL_CIPHER_LIB,
+ OPENSSL_CIPHER_DES3CTR,
+};
+
+/** OPENSSL auth mode enumerator */
+enum openssl_auth_mode {
+ OPENSSL_AUTH_AS_AUTH,
+ OPENSSL_AUTH_AS_HMAC,
+};
+
+/** private data structure for each OPENSSL crypto device */
+struct openssl_private {
+ unsigned int max_nb_qpairs;
+ /**< Max number of queue pairs */
+ unsigned int max_nb_sessions;
+ /**< Max number of sessions */
+};
+
+/** OPENSSL crypto queue pair */
+struct openssl_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats stats;
+ /**< Queue pair statistics */
+} __rte_cache_aligned;
+
+/** OPENSSL crypto private session structure */
+struct openssl_session {
+ enum openssl_chain_order chain_order;
+ /**< chain order mode */
+
+ /** Cipher Parameters */
+ struct {
+ enum rte_crypto_cipher_operation direction;
+ /**< cipher operation direction */
+ enum openssl_cipher_mode mode;
+ /**< cipher operation mode */
+ enum rte_crypto_cipher_algorithm algo;
+ /**< cipher algorithm */
+
+ struct {
+ uint8_t data[32];
+ /**< key data */
+ size_t length;
+ /**< key length in bytes */
+ } key;
+
+ const EVP_CIPHER *evp_algo;
+ /**< pointer to EVP algorithm function */
+ EVP_CIPHER_CTX *ctx;
+ /**< pointer to EVP context structure */
+ EVP_CIPHER_CTX *bpi_ctx;
+ } cipher;
+
+ /** Authentication Parameters */
+ struct {
+ enum rte_crypto_auth_operation operation;
+ /**< auth operation generate or verify */
+ enum openssl_auth_mode mode;
+ /**< auth operation mode */
+ enum rte_crypto_auth_algorithm algo;
+ /**< cipher algorithm */
+
+ union {
+ struct {
+ const EVP_MD *evp_algo;
+ /**< pointer to EVP algorithm function */
+ EVP_MD_CTX *ctx;
+ /**< pointer to EVP context structure */
+ } auth;
+
+ struct {
+ EVP_PKEY *pkey;
+ /**< pointer to EVP key */
+ const EVP_MD *evp_algo;
+ /**< pointer to EVP algorithm function */
+ EVP_MD_CTX *ctx;
+ /**< pointer to EVP context structure */
+ } hmac;
+ };
+ } auth;
+
+} __rte_cache_aligned;
+
+/** Set and validate OPENSSL crypto session parameters */
+extern int
+openssl_set_session_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** Reset OPENSSL crypto session parameters */
+extern void
+openssl_reset_session(struct openssl_session *sess);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_openssl_pmd_ops;
+
+#endif /* _OPENSSL_PMD_PRIVATE_H_ */
diff --git a/src/seastar/dpdk/drivers/crypto/openssl/rte_pmd_openssl_version.map b/src/seastar/dpdk/drivers/crypto/openssl/rte_pmd_openssl_version.map
new file mode 100644
index 00000000..cc5829e3
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/openssl/rte_pmd_openssl_version.map
@@ -0,0 +1,3 @@
+DPDK_16.11 {
+ local: *;
+};
diff --git a/src/seastar/dpdk/drivers/crypto/qat/Makefile b/src/seastar/dpdk/drivers/crypto/qat/Makefile
new file mode 100644
index 00000000..7322ffe4
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/Makefile
@@ -0,0 +1,59 @@
+# BSD LICENSE
+#
+# Copyright(c) 2015 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_qat.a
+
+# library version
+LIBABIVER := 1
+
+# build flags
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -O3
+
+# external library include paths
+CFLAGS += -I$(SRCDIR)/qat_adf
+LDLIBS += -lcrypto
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_crypto.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_qp.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_adf/qat_algs_build_desc.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += rte_qat_cryptodev.c
+
+# export include files
+SYMLINK-y-include +=
+
+# versioning export map
+EXPORT_MAP := rte_pmd_qat_version.map
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
new file mode 100644
index 00000000..d218f85d
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
@@ -0,0 +1,175 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ * Copyright(c) 2015 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * qat-linux@intel.com
+ *
+ * BSD LICENSE
+ * Copyright(c) 2015 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
+#define ADF_TRANSPORT_ACCESS_MACROS_H
+
+#include <rte_io.h>
+
+/* CSR write macro */
+#define ADF_CSR_WR(csrAddr, csrOffset, val) \
+ rte_write32(val, (((uint8_t *)csrAddr) + csrOffset))
+
+/* CSR read macro */
+#define ADF_CSR_RD(csrAddr, csrOffset) \
+ rte_read32((((uint8_t *)csrAddr) + csrOffset))
+
+#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
+#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
+#define ADF_RING_CSR_RING_CONFIG 0x000
+#define ADF_RING_CSR_RING_LBASE 0x040
+#define ADF_RING_CSR_RING_UBASE 0x080
+#define ADF_RING_CSR_RING_HEAD 0x0C0
+#define ADF_RING_CSR_RING_TAIL 0x100
+#define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_INT_SRCSEL 0x174
+#define ADF_RING_CSR_INT_SRCSEL_2 0x178
+#define ADF_RING_CSR_INT_COL_EN 0x17C
+#define ADF_RING_CSR_INT_COL_CTL 0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
+#define ADF_RING_BUNDLE_SIZE 0x1000
+#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A
+#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05
+#define ADF_COALESCING_MIN_TIME 0x1FF
+#define ADF_COALESCING_MAX_TIME 0xFFFFF
+#define ADF_COALESCING_DEF_TIME 0x27FF
+#define ADF_RING_NEAR_WATERMARK_512 0x08
+#define ADF_RING_NEAR_WATERMARK_0 0x00
+#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
+
+/* Valid internal ring size values */
+#define ADF_RING_SIZE_128 0x01
+#define ADF_RING_SIZE_256 0x02
+#define ADF_RING_SIZE_512 0x03
+#define ADF_RING_SIZE_4K 0x06
+#define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_4M 0x10
+#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
+#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
+#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
+
+#define ADF_NUM_BUNDLES_PER_DEV 1
+#define ADF_NUM_SYM_QPS_PER_BUNDLE 2
+
+/* Valid internal msg size values */
+#define ADF_MSG_SIZE_32 0x01
+#define ADF_MSG_SIZE_64 0x02
+#define ADF_MSG_SIZE_128 0x04
+#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
+#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
+
+/* Size to bytes conversion macros for ring and msg size values */
+#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
+#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
+#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
+#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
+
+/* Minimum ring bufer size for memory allocation */
+#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
+ ADF_RING_SIZE_4K : SIZE)
+#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
+#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
+ SIZE) & ~0x4)
+/* Max outstanding requests */
+#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
+ ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1)
+#define BUILD_RING_CONFIG(size) \
+ ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
+ | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+ | size)
+#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \
+ ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \
+ | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+ | size)
+#define BUILD_RING_BASE_ADDR(addr, size) \
+ ((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size))
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_HEAD + (ring << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_TAIL + (ring << 2))
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_E_STAT)
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_CONFIG + (ring << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+ uint32_t l_base = 0, u_base = 0; \
+ l_base = (uint32_t)(value & 0xFFFFFFFF); \
+ u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \
+} while (0)
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_HEAD + (ring << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_TAIL + (ring << 2), value)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+do { \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
+} while (0)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_COL_EN, value)
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_COL_CTL, \
+ ADF_RING_CSR_INT_COL_CTL_ENABLE | value)
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_FLAG_AND_COL, value)
+#endif
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_fw.h b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_fw.h
new file mode 100644
index 00000000..5de34d55
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_fw.h
@@ -0,0 +1,316 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ * Copyright(c) 2015 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * qat-linux@intel.com
+ *
+ * BSD LICENSE
+ * Copyright(c) 2015 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ICP_QAT_FW_H_
+#define _ICP_QAT_FW_H_
+#include <sys/types.h>
+#include "icp_qat_hw.h"
+
+#define QAT_FIELD_SET(flags, val, bitpos, mask) \
+{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
+ (((val) & (mask)) << (bitpos))) ; }
+
+#define QAT_FIELD_GET(flags, bitpos, mask) \
+ (((flags) >> (bitpos)) & (mask))
+
+#define ICP_QAT_FW_REQ_DEFAULT_SZ 128
+#define ICP_QAT_FW_RESP_DEFAULT_SZ 32
+#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8
+#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF
+#define ICP_QAT_FW_NUM_LONGWORDS_1 1
+#define ICP_QAT_FW_NUM_LONGWORDS_2 2
+#define ICP_QAT_FW_NUM_LONGWORDS_3 3
+#define ICP_QAT_FW_NUM_LONGWORDS_4 4
+#define ICP_QAT_FW_NUM_LONGWORDS_5 5
+#define ICP_QAT_FW_NUM_LONGWORDS_6 6
+#define ICP_QAT_FW_NUM_LONGWORDS_7 7
+#define ICP_QAT_FW_NUM_LONGWORDS_10 10
+#define ICP_QAT_FW_NUM_LONGWORDS_13 13
+#define ICP_QAT_FW_NULL_REQ_SERV_ID 1
+
+enum icp_qat_fw_comn_resp_serv_id {
+ ICP_QAT_FW_COMN_RESP_SERV_NULL,
+ ICP_QAT_FW_COMN_RESP_SERV_CPM_FW,
+ ICP_QAT_FW_COMN_RESP_SERV_DELIMITER
+};
+
+enum icp_qat_fw_comn_request_id {
+ ICP_QAT_FW_COMN_REQ_NULL = 0,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9,
+ ICP_QAT_FW_COMN_REQ_DELIMITER
+};
+
+struct icp_qat_fw_comn_req_hdr_cd_pars {
+ union {
+ struct {
+ uint64_t content_desc_addr;
+ uint16_t content_desc_resrvd1;
+ uint8_t content_desc_params_sz;
+ uint8_t content_desc_hdr_resrvd2;
+ uint32_t content_desc_resrvd3;
+ } s;
+ struct {
+ uint32_t serv_specif_fields[4];
+ } s1;
+ } u;
+};
+
+struct icp_qat_fw_comn_req_mid {
+ uint64_t opaque_data;
+ uint64_t src_data_addr;
+ uint64_t dest_data_addr;
+ uint32_t src_length;
+ uint32_t dst_length;
+};
+
+struct icp_qat_fw_comn_req_cd_ctrl {
+ uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
+};
+
+struct icp_qat_fw_comn_req_hdr {
+ uint8_t resrvd1;
+ uint8_t service_cmd_id;
+ uint8_t service_type;
+ uint8_t hdr_flags;
+ uint16_t serv_specif_flags;
+ uint16_t comn_req_flags;
+};
+
+struct icp_qat_fw_comn_req_rqpars {
+ uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
+};
+
+struct icp_qat_fw_comn_req {
+ struct icp_qat_fw_comn_req_hdr comn_hdr;
+ struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+ struct icp_qat_fw_comn_req_mid comn_mid;
+ struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+ struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+struct icp_qat_fw_comn_error {
+ uint8_t xlat_err_code;
+ uint8_t cmp_err_code;
+};
+
+struct icp_qat_fw_comn_resp_hdr {
+ uint8_t resrvd1;
+ uint8_t service_id;
+ uint8_t response_type;
+ uint8_t hdr_flags;
+ struct icp_qat_fw_comn_error comn_error;
+ uint8_t comn_status;
+ uint8_t cmd_id;
+};
+
+struct icp_qat_fw_comn_resp {
+ struct icp_qat_fw_comn_resp_hdr comn_hdr;
+ uint64_t opaque_data;
+ uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
+#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
+#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
+ icp_qat_fw_comn_req_hdr_t.service_type
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \
+ icp_qat_fw_comn_req_hdr_t.service_type = val
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \
+ icp_qat_fw_comn_req_hdr_t.service_cmd_id
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \
+ icp_qat_fw_comn_req_hdr_t.service_cmd_id = val
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
+ ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
+ ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \
+ QAT_FIELD_GET(hdr_flags, \
+ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
+ (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \
+ QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \
+ (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
+
+#define QAT_COMN_PTR_TYPE_BITPOS 0
+#define QAT_COMN_PTR_TYPE_MASK 0x1
+#define QAT_COMN_CD_FLD_TYPE_BITPOS 1
+#define QAT_COMN_CD_FLD_TYPE_MASK 0x1
+#define QAT_COMN_PTR_TYPE_FLAT 0x0
+#define QAT_COMN_PTR_TYPE_SGL 0x1
+#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
+#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
+
+#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
+ ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
+ | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+ QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \
+ QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+ QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4
+#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0
+#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0
+#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F
+
+#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+ >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+ { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK)); }
+
+#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+ { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
+
+#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
+#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
+#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
+#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
+#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
+
+#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \
+ ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \
+ QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \
+ (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
+ QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
+ (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
+ QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
+ (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
+ QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
+
+#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
+ QAT_COMN_RESP_CRYPTO_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
+ QAT_COMN_RESP_CMP_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
+ QAT_COMN_RESP_XLAT_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
+ QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
+
+#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
+#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
+#define ERR_CODE_NO_ERROR 0
+#define ERR_CODE_INVALID_BLOCK_TYPE -1
+#define ERR_CODE_NO_MATCH_ONES_COMP -2
+#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
+#define ERR_CODE_INCOMPLETE_LEN -4
+#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
+#define ERR_CODE_RPT_GT_SPEC_LEN -6
+#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
+#define ERR_CODE_INV_DIS_CODE_LEN -8
+#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
+#define ERR_CODE_DIS_TOO_FAR_BACK -10
+#define ERR_CODE_OVERFLOW_ERROR -11
+#define ERR_CODE_SOFT_ERROR -12
+#define ERR_CODE_FATAL_ERROR -13
+#define ERR_CODE_SSM_ERROR -14
+#define ERR_CODE_ENDPOINT_ERROR -15
+
+enum icp_qat_fw_slice {
+ ICP_QAT_FW_SLICE_NULL = 0,
+ ICP_QAT_FW_SLICE_CIPHER = 1,
+ ICP_QAT_FW_SLICE_AUTH = 2,
+ ICP_QAT_FW_SLICE_DRAM_RD = 3,
+ ICP_QAT_FW_SLICE_DRAM_WR = 4,
+ ICP_QAT_FW_SLICE_COMP = 5,
+ ICP_QAT_FW_SLICE_XLAT = 6,
+ ICP_QAT_FW_SLICE_DELIMITER
+};
+#endif
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h
new file mode 100644
index 00000000..fbf2b839
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h
@@ -0,0 +1,404 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ * Copyright(c) 2015 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * qat-linux@intel.com
+ *
+ * BSD LICENSE
+ * Copyright(c) 2015 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ICP_QAT_FW_LA_H_
+#define _ICP_QAT_FW_LA_H_
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_la_cmd_id {
+ ICP_QAT_FW_LA_CMD_CIPHER = 0,
+ ICP_QAT_FW_LA_CMD_AUTH = 1,
+ ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2,
+ ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3,
+ ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4,
+ ICP_QAT_FW_LA_CMD_TRNG_TEST = 5,
+ ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6,
+ ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7,
+ ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8,
+ ICP_QAT_FW_LA_CMD_MGF1 = 9,
+ ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10,
+ ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11,
+ ICP_QAT_FW_LA_CMD_DELIMITER = 12
+};
+
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+
+struct icp_qat_fw_la_bulk_req {
+ struct icp_qat_fw_comn_req_hdr comn_hdr;
+ struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+ struct icp_qat_fw_comn_req_mid comn_mid;
+ struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+ struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
+#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1
+#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11
+#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1
+#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0
+#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10
+#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1
+#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4
+#define ICP_QAT_FW_LA_GCM_PROTO 2
+#define ICP_QAT_FW_LA_CCM_PROTO 1
+#define ICP_QAT_FW_LA_NO_PROTO 0
+#define QAT_LA_PROTO_BITPOS 7
+#define QAT_LA_PROTO_MASK 0x7
+#define ICP_QAT_FW_LA_CMP_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0
+#define QAT_LA_CMP_AUTH_RES_BITPOS 6
+#define QAT_LA_CMP_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_RET_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0
+#define QAT_LA_RET_AUTH_RES_BITPOS 5
+#define QAT_LA_RET_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_UPDATE_STATE 1
+#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0
+#define QAT_LA_UPDATE_STATE_BITPOS 4
+#define QAT_LA_UPDATE_STATE_MASK 0x1
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1
+#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0
+#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1
+#define QAT_LA_CIPH_IV_FLD_BITPOS 2
+#define QAT_LA_CIPH_IV_FLD_MASK 0x1
+#define ICP_QAT_FW_LA_PARTIAL_NONE 0
+#define ICP_QAT_FW_LA_PARTIAL_START 1
+#define ICP_QAT_FW_LA_PARTIAL_MID 3
+#define ICP_QAT_FW_LA_PARTIAL_END 2
+#define QAT_LA_PARTIAL_BITPOS 0
+#define QAT_LA_PARTIAL_MASK 0x3
+#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
+ cmp_auth, ret_auth, update_state, \
+ ciph_iv, ciphcfg, partial) \
+ (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \
+ QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \
+ ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \
+ QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \
+ ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \
+ QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \
+ ((proto & QAT_LA_PROTO_MASK) << \
+ QAT_LA_PROTO_BITPOS) | \
+ ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \
+ QAT_LA_CMP_AUTH_RES_BITPOS) | \
+ ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \
+ QAT_LA_RET_AUTH_RES_BITPOS) | \
+ ((update_state & QAT_LA_UPDATE_STATE_MASK) << \
+ QAT_LA_UPDATE_STATE_BITPOS) | \
+ ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \
+ QAT_LA_CIPH_IV_FLD_BITPOS) | \
+ ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \
+ QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \
+ ((partial & QAT_LA_PARTIAL_MASK) << \
+ QAT_LA_PARTIAL_BITPOS))
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \
+ QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+ QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+ QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+ QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \
+ QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \
+ QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+ QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \
+ QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \
+ QAT_LA_PARTIAL_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \
+ QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+ QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+ QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+ QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \
+ QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \
+ QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \
+ QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+ QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \
+ QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
+ QAT_LA_PARTIAL_MASK)
+
+struct icp_qat_fw_cipher_req_hdr_cd_pars {
+ union {
+ struct {
+ uint64_t content_desc_addr;
+ uint16_t content_desc_resrvd1;
+ uint8_t content_desc_params_sz;
+ uint8_t content_desc_hdr_resrvd2;
+ uint32_t content_desc_resrvd3;
+ } s;
+ struct {
+ uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ } s1;
+ } u;
+};
+
+struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
+ union {
+ struct {
+ uint64_t content_desc_addr;
+ uint16_t content_desc_resrvd1;
+ uint8_t content_desc_params_sz;
+ uint8_t content_desc_hdr_resrvd2;
+ uint32_t content_desc_resrvd3;
+ } s;
+ struct {
+ uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ } sl;
+ } u;
+};
+
+struct icp_qat_fw_cipher_cd_ctrl_hdr {
+ uint8_t cipher_state_sz;
+ uint8_t cipher_key_sz;
+ uint8_t cipher_cfg_offset;
+ uint8_t next_curr_id;
+ uint8_t cipher_padding_sz;
+ uint8_t resrvd1;
+ uint16_t resrvd2;
+ uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
+};
+
+struct icp_qat_fw_auth_cd_ctrl_hdr {
+ uint32_t resrvd1;
+ uint8_t resrvd2;
+ uint8_t hash_flags;
+ uint8_t hash_cfg_offset;
+ uint8_t next_curr_id;
+ uint8_t resrvd3;
+ uint8_t outer_prefix_sz;
+ uint8_t final_sz;
+ uint8_t inner_res_sz;
+ uint8_t resrvd4;
+ uint8_t inner_state1_sz;
+ uint8_t inner_state2_offset;
+ uint8_t inner_state2_sz;
+ uint8_t outer_config_offset;
+ uint8_t outer_state1_sz;
+ uint8_t outer_res_sz;
+ uint8_t outer_prefix_offset;
+};
+
+struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
+ uint8_t cipher_state_sz;
+ uint8_t cipher_key_sz;
+ uint8_t cipher_cfg_offset;
+ uint8_t next_curr_id_cipher;
+ uint8_t cipher_padding_sz;
+ uint8_t hash_flags;
+ uint8_t hash_cfg_offset;
+ uint8_t next_curr_id_auth;
+ uint8_t resrvd1;
+ uint8_t outer_prefix_sz;
+ uint8_t final_sz;
+ uint8_t inner_res_sz;
+ uint8_t resrvd2;
+ uint8_t inner_state1_sz;
+ uint8_t inner_state2_offset;
+ uint8_t inner_state2_sz;
+ uint8_t outer_config_offset;
+ uint8_t outer_state1_sz;
+ uint8_t outer_res_sz;
+ uint8_t outer_prefix_offset;
+};
+
+#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
+#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0
+#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX 240
+#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \
+ (sizeof(struct icp_qat_fw_la_cipher_req_params_t))
+#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
+
+struct icp_qat_fw_la_cipher_req_params {
+ uint32_t cipher_offset;
+ uint32_t cipher_length;
+ union {
+ uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ struct {
+ uint64_t cipher_IV_ptr;
+ uint64_t resrvd1;
+ } s;
+ } u;
+};
+
+struct icp_qat_fw_la_auth_req_params {
+ uint32_t auth_off;
+ uint32_t auth_len;
+ union {
+ uint64_t auth_partial_st_prefix;
+ uint64_t aad_adr;
+ } u1;
+ uint64_t auth_res_addr;
+ union {
+ uint8_t inner_prefix_sz;
+ uint8_t aad_sz;
+ } u2;
+ uint8_t resrvd1;
+ uint8_t hash_state_sz;
+ uint8_t auth_res_sz;
+} __rte_packed;
+
+struct icp_qat_fw_la_auth_req_params_resrvd_flds {
+ uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
+ union {
+ uint8_t inner_prefix_sz;
+ uint8_t aad_sz;
+ } u2;
+ uint8_t resrvd1;
+ uint16_t resrvd2;
+};
+
+struct icp_qat_fw_la_resp {
+ struct icp_qat_fw_comn_resp_hdr comn_resp;
+ uint64_t opaque_data;
+ uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
+ ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+ >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id_auth) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#endif
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_hw.h b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_hw.h
new file mode 100644
index 00000000..ebe245f6
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/icp_qat_hw.h
@@ -0,0 +1,309 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ * Copyright(c) 2015 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * qat-linux@intel.com
+ *
+ * BSD LICENSE
+ * Copyright(c) 2015 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ICP_QAT_HW_H_
+#define _ICP_QAT_HW_H_
+
+enum icp_qat_hw_ae_id {
+ ICP_QAT_HW_AE_0 = 0,
+ ICP_QAT_HW_AE_1 = 1,
+ ICP_QAT_HW_AE_2 = 2,
+ ICP_QAT_HW_AE_3 = 3,
+ ICP_QAT_HW_AE_4 = 4,
+ ICP_QAT_HW_AE_5 = 5,
+ ICP_QAT_HW_AE_6 = 6,
+ ICP_QAT_HW_AE_7 = 7,
+ ICP_QAT_HW_AE_8 = 8,
+ ICP_QAT_HW_AE_9 = 9,
+ ICP_QAT_HW_AE_10 = 10,
+ ICP_QAT_HW_AE_11 = 11,
+ ICP_QAT_HW_AE_DELIMITER = 12
+};
+
+enum icp_qat_hw_qat_id {
+ ICP_QAT_HW_QAT_0 = 0,
+ ICP_QAT_HW_QAT_1 = 1,
+ ICP_QAT_HW_QAT_2 = 2,
+ ICP_QAT_HW_QAT_3 = 3,
+ ICP_QAT_HW_QAT_4 = 4,
+ ICP_QAT_HW_QAT_5 = 5,
+ ICP_QAT_HW_QAT_DELIMITER = 6
+};
+
+enum icp_qat_hw_auth_algo {
+ ICP_QAT_HW_AUTH_ALGO_NULL = 0,
+ ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
+ ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
+ ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
+ ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
+ ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
+ ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
+ ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
+ ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
+ ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
+ ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
+ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
+ ICP_QAT_HW_AUTH_RESERVED_1 = 15,
+ ICP_QAT_HW_AUTH_RESERVED_2 = 16,
+ ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
+ ICP_QAT_HW_AUTH_RESERVED_3 = 18,
+ ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
+ ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
+};
+
+enum icp_qat_hw_auth_mode {
+ ICP_QAT_HW_AUTH_MODE0 = 0,
+ ICP_QAT_HW_AUTH_MODE1 = 1,
+ ICP_QAT_HW_AUTH_MODE2 = 2,
+ ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
+};
+
+struct icp_qat_hw_auth_config {
+ uint32_t config;
+ uint32_t reserved;
+};
+
+#define QAT_AUTH_MODE_BITPOS 4
+#define QAT_AUTH_MODE_MASK 0xF
+#define QAT_AUTH_ALGO_BITPOS 0
+#define QAT_AUTH_ALGO_MASK 0xF
+#define QAT_AUTH_CMP_BITPOS 8
+#define QAT_AUTH_CMP_MASK 0x7F
+#define QAT_AUTH_SHA3_PADDING_BITPOS 16
+#define QAT_AUTH_SHA3_PADDING_MASK 0x1
+#define QAT_AUTH_ALGO_SHA3_BITPOS 22
+#define QAT_AUTH_ALGO_SHA3_MASK 0x3
+#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
+ (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
+ ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
+ (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
+ QAT_AUTH_ALGO_SHA3_BITPOS) | \
+ (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
+ (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
+ & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
+ ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
+
+struct icp_qat_hw_auth_counter {
+ uint32_t counter;
+ uint32_t reserved;
+};
+
+#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
+#define QAT_AUTH_COUNT_BITPOS 0
+#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
+ (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
+
+struct icp_qat_hw_auth_setup {
+ struct icp_qat_hw_auth_config auth_config;
+ struct icp_qat_hw_auth_counter auth_counter;
+};
+
+#define QAT_HW_DEFAULT_ALIGNMENT 8
+#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1)))
+#define ICP_QAT_HW_NULL_STATE1_SZ 32
+#define ICP_QAT_HW_MD5_STATE1_SZ 16
+#define ICP_QAT_HW_SHA1_STATE1_SZ 20
+#define ICP_QAT_HW_SHA224_STATE1_SZ 32
+#define ICP_QAT_HW_SHA256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA384_STATE1_SZ 64
+#define ICP_QAT_HW_SHA512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
+#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
+#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
+#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
+#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
+#define ICP_QAT_HW_NULL_STATE2_SZ 32
+#define ICP_QAT_HW_MD5_STATE2_SZ 16
+#define ICP_QAT_HW_SHA1_STATE2_SZ 20
+#define ICP_QAT_HW_SHA224_STATE2_SZ 32
+#define ICP_QAT_HW_SHA256_STATE2_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
+#define ICP_QAT_HW_SHA384_STATE2_SZ 64
+#define ICP_QAT_HW_SHA512_STATE2_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
+#define ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ 48
+#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
+#define ICP_QAT_HW_F9_IK_SZ 16
+#define ICP_QAT_HW_F9_FK_SZ 16
+#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
+ ICP_QAT_HW_F9_FK_SZ)
+#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
+#define ICP_QAT_HW_GALOIS_H_SZ 16
+#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
+#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
+
+struct icp_qat_hw_auth_sha512 {
+ struct icp_qat_hw_auth_setup inner_setup;
+ uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ];
+ struct icp_qat_hw_auth_setup outer_setup;
+ uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
+};
+
+struct icp_qat_hw_auth_algo_blk {
+ struct icp_qat_hw_auth_sha512 sha;
+};
+
+#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
+#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
+
+enum icp_qat_hw_cipher_algo {
+ ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
+ ICP_QAT_HW_CIPHER_ALGO_DES = 1,
+ ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
+ ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
+ ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
+ ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
+ ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
+ ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
+ ICP_QAT_HW_CIPHER_DELIMITER = 10
+};
+
+enum icp_qat_hw_cipher_mode {
+ ICP_QAT_HW_CIPHER_ECB_MODE = 0,
+ ICP_QAT_HW_CIPHER_CBC_MODE = 1,
+ ICP_QAT_HW_CIPHER_CTR_MODE = 2,
+ ICP_QAT_HW_CIPHER_F8_MODE = 3,
+ ICP_QAT_HW_CIPHER_XTS_MODE = 6,
+ ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
+};
+
+struct icp_qat_hw_cipher_config {
+ uint32_t val;
+ uint32_t reserved;
+};
+
+enum icp_qat_hw_cipher_dir {
+ ICP_QAT_HW_CIPHER_ENCRYPT = 0,
+ ICP_QAT_HW_CIPHER_DECRYPT = 1,
+};
+
+enum icp_qat_hw_auth_op {
+ ICP_QAT_HW_AUTH_VERIFY = 0,
+ ICP_QAT_HW_AUTH_GENERATE = 1,
+};
+
+enum icp_qat_hw_cipher_convert {
+ ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
+ ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
+};
+
+#define QAT_CIPHER_MODE_BITPOS 4
+#define QAT_CIPHER_MODE_MASK 0xF
+#define QAT_CIPHER_ALGO_BITPOS 0
+#define QAT_CIPHER_ALGO_MASK 0xF
+#define QAT_CIPHER_CONVERT_BITPOS 9
+#define QAT_CIPHER_CONVERT_MASK 0x1
+#define QAT_CIPHER_DIR_BITPOS 8
+#define QAT_CIPHER_DIR_MASK 0x1
+#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
+#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
+#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
+ (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
+ ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
+ ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
+ ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
+#define ICP_QAT_HW_DES_BLK_SZ 8
+#define ICP_QAT_HW_3DES_BLK_SZ 8
+#define ICP_QAT_HW_NULL_BLK_SZ 8
+#define ICP_QAT_HW_AES_BLK_SZ 16
+#define ICP_QAT_HW_KASUMI_BLK_SZ 8
+#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
+#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
+#define ICP_QAT_HW_NULL_KEY_SZ 256
+#define ICP_QAT_HW_DES_KEY_SZ 8
+#define ICP_QAT_HW_3DES_KEY_SZ 24
+#define ICP_QAT_HW_AES_128_KEY_SZ 16
+#define ICP_QAT_HW_AES_192_KEY_SZ 24
+#define ICP_QAT_HW_AES_256_KEY_SZ 32
+#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_KASUMI_KEY_SZ 16
+#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_ARC4_KEY_SZ 256
+#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
+#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
+
+#define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ
+
+struct icp_qat_hw_cipher_algo_blk {
+ struct icp_qat_hw_cipher_config cipher_config;
+ uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ];
+} __rte_cache_aligned;
+
+#endif
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_adf/qat_algs.h b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/qat_algs.h
new file mode 100644
index 00000000..5c63406b
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/qat_algs.h
@@ -0,0 +1,174 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ * Copyright(c) 2015-2016 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * qat-linux@intel.com
+ *
+ * BSD LICENSE
+ * Copyright(c) 2015-2016 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ICP_QAT_ALGS_H_
+#define _ICP_QAT_ALGS_H_
+#include <rte_memory.h>
+#include <rte_crypto.h>
+#include "icp_qat_hw.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+/*
+ * Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR
+ * Integrity Key (IK)
+ */
+#define KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA
+
+#define KASUMI_F8_KEY_MODIFIER_4_BYTES 0x55555555
+
+/* 3DES key sizes */
+#define QAT_3DES_KEY_SZ_OPT1 24 /* Keys are independent */
+#define QAT_3DES_KEY_SZ_OPT2 16 /* K3=K1 */
+
+#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+ ICP_QAT_HW_CIPHER_NO_CONVERT, \
+ ICP_QAT_HW_CIPHER_ENCRYPT)
+
+#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+ ICP_QAT_HW_CIPHER_KEY_CONVERT, \
+ ICP_QAT_HW_CIPHER_DECRYPT)
+
+struct qat_alg_buf {
+ uint32_t len;
+ uint32_t resrvd;
+ uint64_t addr;
+} __rte_packed;
+
+enum qat_crypto_proto_flag {
+ QAT_CRYPTO_PROTO_FLAG_NONE = 0,
+ QAT_CRYPTO_PROTO_FLAG_CCM = 1,
+ QAT_CRYPTO_PROTO_FLAG_GCM = 2,
+ QAT_CRYPTO_PROTO_FLAG_SNOW3G = 3,
+ QAT_CRYPTO_PROTO_FLAG_ZUC = 4
+};
+
+/*
+ * Maximum number of SGL entries
+ */
+#define QAT_SGL_MAX_NUMBER 16
+
+struct qat_alg_buf_list {
+ uint64_t resrvd;
+ uint32_t num_bufs;
+ uint32_t num_mapped_bufs;
+ struct qat_alg_buf bufers[QAT_SGL_MAX_NUMBER];
+} __rte_packed __rte_cache_aligned;
+
+struct qat_crypto_op_cookie {
+ struct qat_alg_buf_list qat_sgl_list_src;
+ struct qat_alg_buf_list qat_sgl_list_dst;
+ phys_addr_t qat_sgl_src_phys_addr;
+ phys_addr_t qat_sgl_dst_phys_addr;
+};
+
+/* Common content descriptor */
+struct qat_alg_cd {
+ struct icp_qat_hw_cipher_algo_blk cipher;
+ struct icp_qat_hw_auth_algo_blk hash;
+} __rte_packed __rte_cache_aligned;
+
+struct qat_session {
+ enum icp_qat_fw_la_cmd_id qat_cmd;
+ enum icp_qat_hw_cipher_algo qat_cipher_alg;
+ enum icp_qat_hw_cipher_dir qat_dir;
+ enum icp_qat_hw_cipher_mode qat_mode;
+ enum icp_qat_hw_auth_algo qat_hash_alg;
+ enum icp_qat_hw_auth_op auth_op;
+ void *bpi_ctx;
+ struct qat_alg_cd cd;
+ uint8_t *cd_cur_ptr;
+ phys_addr_t cd_paddr;
+ struct icp_qat_fw_la_bulk_req fw_req;
+ uint8_t aad_len;
+ struct qat_crypto_instance *inst;
+ rte_spinlock_t lock; /* protects this struct */
+};
+
+struct qat_alg_ablkcipher_cd {
+ struct icp_qat_hw_cipher_algo_blk *cd;
+ phys_addr_t cd_paddr;
+ struct icp_qat_fw_la_bulk_req fw_req;
+ struct qat_crypto_instance *inst;
+ rte_spinlock_t lock; /* protects this struct */
+};
+
+int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg);
+
+int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cd,
+ uint8_t *enckey,
+ uint32_t enckeylen);
+
+int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
+ uint8_t *authkey,
+ uint32_t authkeylen,
+ uint32_t add_auth_data_length,
+ uint32_t digestsize,
+ unsigned int operation);
+
+void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ enum qat_crypto_proto_flag proto_flags);
+
+void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cd,
+ int alg, const uint8_t *key,
+ unsigned int keylen);
+
+void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cd,
+ int alg, const uint8_t *key,
+ unsigned int keylen);
+
+int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int qat_alg_validate_aes_docsisbpi_key(int key_len,
+ enum icp_qat_hw_cipher_algo *alg);
+int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg);
+int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+#endif
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
new file mode 100644
index 00000000..154e1ddd
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -0,0 +1,1014 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ * Copyright(c) 2015-2016 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * qat-linux@intel.com
+ *
+ * BSD LICENSE
+ * Copyright(c) 2015-2016 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_memcpy.h>
+#include <rte_common.h>
+#include <rte_spinlock.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_crypto_sym.h>
+
+#include "../qat_logs.h"
+
+#include <openssl/sha.h> /* Needed to calculate pre-compute values */
+#include <openssl/aes.h> /* Needed to calculate pre-compute values */
+#include <openssl/md5.h> /* Needed to calculate pre-compute values */
+
+#include "qat_algs.h"
+
+/* returns block size in bytes per cipher algo */
+int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
+{
+ switch (qat_cipher_alg) {
+ case ICP_QAT_HW_CIPHER_ALGO_DES:
+ return ICP_QAT_HW_DES_BLK_SZ;
+ case ICP_QAT_HW_CIPHER_ALGO_3DES:
+ return ICP_QAT_HW_3DES_BLK_SZ;
+ case ICP_QAT_HW_CIPHER_ALGO_AES128:
+ case ICP_QAT_HW_CIPHER_ALGO_AES192:
+ case ICP_QAT_HW_CIPHER_ALGO_AES256:
+ return ICP_QAT_HW_AES_BLK_SZ;
+ default:
+ PMD_DRV_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
+ return -EFAULT;
+ };
+ return -EFAULT;
+}
+
+/*
+ * Returns size in bytes per hash algo for state1 size field in cd_ctrl
+ * This is digest size rounded up to nearest quadword
+ */
+static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+ switch (qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
+ /* return maximum state1 size in this case */
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ default:
+ PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ return -EFAULT;
+ };
+ return -EFAULT;
+}
+
+/* returns digest size in bytes per hash algo */
+static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+ switch (qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ return ICP_QAT_HW_SHA1_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ return ICP_QAT_HW_SHA224_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ return ICP_QAT_HW_SHA256_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ return ICP_QAT_HW_SHA384_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ return ICP_QAT_HW_SHA512_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ return ICP_QAT_HW_MD5_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
+ /* return maximum digest size in this case */
+ return ICP_QAT_HW_SHA512_STATE1_SZ;
+ default:
+ PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ return -EFAULT;
+ };
+ return -EFAULT;
+}
+
+/* returns block size in byes per hash algo */
+static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+ switch (qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ return SHA_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ return SHA256_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ return SHA256_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ return SHA512_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ return SHA512_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ return 16;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ return MD5_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
+ /* return maximum block size in this case */
+ return SHA512_CBLOCK;
+ default:
+ PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ return -EFAULT;
+ };
+ return -EFAULT;
+}
+
+static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA_CTX ctx;
+
+ if (!SHA1_Init(&ctx))
+ return -EFAULT;
+ SHA1_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA224_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA256_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA384_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA512_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
+{
+ MD5_CTX ctx;
+
+ if (!MD5_Init(&ctx))
+ return -EFAULT;
+ MD5_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
+
+ return 0;
+}
+
+static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+ uint8_t *data_in,
+ uint8_t *data_out)
+{
+ int digest_size;
+ uint8_t digest[qat_hash_get_digest_size(
+ ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
+ uint32_t *hash_state_out_be32;
+ uint64_t *hash_state_out_be64;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+ digest_size = qat_hash_get_digest_size(hash_alg);
+ if (digest_size <= 0)
+ return -EFAULT;
+
+ hash_state_out_be32 = (uint32_t *)data_out;
+ hash_state_out_be64 = (uint64_t *)data_out;
+
+ switch (hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ if (partial_hash_sha1(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
+ *hash_state_out_be32 =
+ rte_bswap32(*(((uint32_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ if (partial_hash_sha224(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
+ *hash_state_out_be32 =
+ rte_bswap32(*(((uint32_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ if (partial_hash_sha256(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
+ *hash_state_out_be32 =
+ rte_bswap32(*(((uint32_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ if (partial_hash_sha384(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
+ *hash_state_out_be64 =
+ rte_bswap64(*(((uint64_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ if (partial_hash_sha512(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
+ *hash_state_out_be64 =
+ rte_bswap64(*(((uint64_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ if (partial_hash_md5(data_in, data_out))
+ return -EFAULT;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+#define HMAC_IPAD_VALUE 0x36
+#define HMAC_OPAD_VALUE 0x5c
+#define HASH_XCBC_PRECOMP_KEY_NUM 3
+
+static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
+ const uint8_t *auth_key,
+ uint16_t auth_keylen,
+ uint8_t *p_state_buf,
+ uint16_t *p_state_len)
+{
+ int block_size;
+ uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
+ uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+ if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
+ static uint8_t qat_aes_xcbc_key_seed[
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ };
+
+ uint8_t *in = NULL;
+ uint8_t *out = p_state_buf;
+ int x;
+ AES_KEY enc_key;
+
+ in = rte_zmalloc("working mem for key",
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
+ rte_memcpy(in, qat_aes_xcbc_key_seed,
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+ for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
+ if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
+ &enc_key) != 0) {
+ rte_free(in -
+ (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
+ memset(out -
+ (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
+ 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+ return -EFAULT;
+ }
+ AES_encrypt(in, out, &enc_key);
+ in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+ out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+ }
+ *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
+ rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
+ return 0;
+ } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
+ (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
+ uint8_t *in = NULL;
+ uint8_t *out = p_state_buf;
+ AES_KEY enc_key;
+
+ memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
+ ICP_QAT_HW_GALOIS_LEN_A_SZ +
+ ICP_QAT_HW_GALOIS_E_CTR0_SZ);
+ in = rte_zmalloc("working mem for key",
+ ICP_QAT_HW_GALOIS_H_SZ, 16);
+ memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
+ if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
+ &enc_key) != 0) {
+ return -EFAULT;
+ }
+ AES_encrypt(in, out, &enc_key);
+ *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
+ ICP_QAT_HW_GALOIS_LEN_A_SZ +
+ ICP_QAT_HW_GALOIS_E_CTR0_SZ;
+ rte_free(in);
+ return 0;
+ }
+
+ block_size = qat_hash_get_block_size(hash_alg);
+ if (block_size <= 0)
+ return -EFAULT;
+ /* init ipad and opad from key and xor with fixed values */
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
+
+ if (auth_keylen > (unsigned int)block_size) {
+ PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
+ return -EFAULT;
+ }
+ rte_memcpy(ipad, auth_key, auth_keylen);
+ rte_memcpy(opad, auth_key, auth_keylen);
+
+ for (i = 0; i < block_size; i++) {
+ uint8_t *ipad_ptr = ipad + i;
+ uint8_t *opad_ptr = opad + i;
+ *ipad_ptr ^= HMAC_IPAD_VALUE;
+ *opad_ptr ^= HMAC_OPAD_VALUE;
+ }
+
+ /* do partial hash of ipad and copy to state1 */
+ if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
+ PMD_DRV_LOG(ERR, "ipad precompute failed");
+ return -EFAULT;
+ }
+
+ /*
+ * State len is a multiple of 8, so may be larger than the digest.
+ * Put the partial hash of opad state_len bytes after state1
+ */
+ *p_state_len = qat_hash_get_state1_size(hash_alg);
+ if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
+ PMD_DRV_LOG(ERR, "opad precompute failed");
+ return -EFAULT;
+ }
+
+ /* don't leave data lying around */
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
+ return 0;
+}
+
+void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ enum qat_crypto_proto_flag proto_flags)
+{
+ PMD_INIT_FUNC_TRACE();
+ header->hdr_flags =
+ ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+ header->comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
+ QAT_COMN_PTR_TYPE_FLAT);
+ ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_PARTIAL_NONE);
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
+
+ switch (proto_flags) {
+ case QAT_CRYPTO_PROTO_FLAG_NONE:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_PROTO);
+ break;
+ case QAT_CRYPTO_PROTO_FLAG_CCM:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_CCM_PROTO);
+ break;
+ case QAT_CRYPTO_PROTO_FLAG_GCM:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_PROTO);
+ break;
+ case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_SNOW_3G_PROTO);
+ break;
+ case QAT_CRYPTO_PROTO_FLAG_ZUC:
+ ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_ZUC_3G_PROTO);
+ break;
+ }
+
+ ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_UPDATE_STATE);
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
+}
+
+/*
+ * Snow3G and ZUC should never use this function
+ * and set its protocol flag in both cipher and auth part of content
+ * descriptor building function
+ */
+static enum qat_crypto_proto_flag
+qat_get_crypto_proto_flag(uint16_t flags)
+{
+ int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
+ enum qat_crypto_proto_flag qat_proto_flag =
+ QAT_CRYPTO_PROTO_FLAG_NONE;
+
+ switch (proto) {
+ case ICP_QAT_FW_LA_GCM_PROTO:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
+ break;
+ case ICP_QAT_FW_LA_CCM_PROTO:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
+ break;
+ }
+
+ return qat_proto_flag;
+}
+
+int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
+ uint8_t *cipherkey,
+ uint32_t cipherkeylen)
+{
+ struct icp_qat_hw_cipher_algo_blk *cipher;
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ void *ptr = &req_tmpl->cd_ctrl;
+ struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+ struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+ enum icp_qat_hw_cipher_convert key_convert;
+ enum qat_crypto_proto_flag qat_proto_flag =
+ QAT_CRYPTO_PROTO_FLAG_NONE;
+ uint32_t total_key_size;
+ uint16_t cipher_offset, cd_size;
+ uint32_t wordIndex = 0;
+ uint32_t *temp_key = NULL;
+ PMD_INIT_FUNC_TRACE();
+
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+ cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
+ PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
+ return -EFAULT;
+ }
+
+ if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
+ /*
+ * CTR Streaming ciphers are a special case. Decrypt = encrypt
+ * Overriding default values previously set
+ */
+ cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
+ || cdesc->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
+ key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
+ else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
+ key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
+ else
+ key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
+
+ if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+ total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
+ ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
+ cipher_cd_ctrl->cipher_state_sz =
+ ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
+
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
+ total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
+ cipher_cd_ctrl->cipher_padding_sz =
+ (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
+ total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
+ qat_proto_flag =
+ qat_get_crypto_proto_flag(header->serv_specif_flags);
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
+ total_key_size = ICP_QAT_HW_DES_KEY_SZ;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
+ qat_proto_flag =
+ qat_get_crypto_proto_flag(header->serv_specif_flags);
+ } else if (cdesc->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+ total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
+ ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
+ cipher_cd_ctrl->cipher_state_sz =
+ ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
+ } else {
+ total_key_size = cipherkeylen;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
+ qat_proto_flag =
+ qat_get_crypto_proto_flag(header->serv_specif_flags);
+ }
+ cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
+ cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
+ cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
+
+ header->service_cmd_id = cdesc->qat_cmd;
+ qat_alg_init_common_hdr(header, qat_proto_flag);
+
+ cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
+ cipher->cipher_config.val =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
+ cdesc->qat_cipher_alg, key_convert,
+ cdesc->qat_dir);
+
+ if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
+ temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
+ sizeof(struct icp_qat_hw_cipher_config)
+ + cipherkeylen);
+ memcpy(cipher->key, cipherkey, cipherkeylen);
+ memcpy(temp_key, cipherkey, cipherkeylen);
+
+ /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
+ for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
+ wordIndex++)
+ temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
+
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
+ cipherkeylen + cipherkeylen;
+ } else {
+ memcpy(cipher->key, cipherkey, cipherkeylen);
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
+ cipherkeylen;
+ }
+
+ if (total_key_size > cipherkeylen) {
+ uint32_t padding_size = total_key_size-cipherkeylen;
+ if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
+ && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2))
+ /* K3 not provided so use K1 = K3*/
+ memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
+ else
+ memset(cdesc->cd_cur_ptr, 0, padding_size);
+ cdesc->cd_cur_ptr += padding_size;
+ }
+ cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
+ cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
+
+ return 0;
+}
+
+int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
+ uint8_t *authkey,
+ uint32_t authkeylen,
+ uint32_t add_auth_data_length,
+ uint32_t digestsize,
+ unsigned int operation)
+{
+ struct icp_qat_hw_auth_setup *hash;
+ struct icp_qat_hw_cipher_algo_blk *cipherconfig;
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ void *ptr = &req_tmpl->cd_ctrl;
+ struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+ struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+ struct icp_qat_fw_la_auth_req_params *auth_param =
+ (struct icp_qat_fw_la_auth_req_params *)
+ ((char *)&req_tmpl->serv_specif_rqpars +
+ sizeof(struct icp_qat_fw_la_cipher_req_params));
+ uint16_t state1_size = 0, state2_size = 0;
+ uint16_t hash_offset, cd_size;
+ uint32_t *aad_len = NULL;
+ uint32_t wordIndex = 0;
+ uint32_t *pTempKey;
+ enum qat_crypto_proto_flag qat_proto_flag =
+ QAT_CRYPTO_PROTO_FLAG_NONE;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
+ return -EFAULT;
+ }
+
+ if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_CMP_AUTH_RES);
+ cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
+ } else {
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
+ }
+
+ /*
+ * Setup the inner hash config
+ */
+ hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
+ hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
+ hash->auth_config.reserved = 0;
+ hash->auth_config.config =
+ ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+ cdesc->qat_hash_alg, digestsize);
+
+ if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
+ || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
+ || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3)
+ hash->auth_counter.counter = 0;
+ else
+ hash->auth_counter.counter = rte_bswap32(
+ qat_hash_get_block_size(cdesc->qat_hash_alg));
+
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
+
+ /*
+ * cd_cur_ptr now points at the state1 information.
+ */
+ switch (cdesc->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
+ state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
+ authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
+ &state2_size)) {
+ PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
+ return -EFAULT;
+ }
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
+ state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
+ if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
+ authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
+ &state2_size)) {
+ PMD_DRV_LOG(ERR, "(GCM)precompute failed");
+ return -EFAULT;
+ }
+ /*
+ * Write (the length of AAD) into bytes 16-19 of state2
+ * in big-endian format. This field is 8 bytes
+ */
+ auth_param->u2.aad_sz =
+ RTE_ALIGN_CEIL(add_auth_data_length, 16);
+ auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
+
+ aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
+ ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+ ICP_QAT_HW_GALOIS_H_SZ);
+ *aad_len = rte_bswap32(add_auth_data_length);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
+ state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
+ memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
+
+ cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
+ (cdesc->cd_cur_ptr + state1_size + state2_size);
+ cipherconfig->cipher_config.val =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
+ ICP_QAT_HW_CIPHER_KEY_CONVERT,
+ ICP_QAT_HW_CIPHER_ENCRYPT);
+ memcpy(cipherconfig->key, authkey, authkeylen);
+ memset(cipherconfig->key + authkeylen,
+ 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
+ authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
+ auth_param->hash_state_sz =
+ RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
+ hash->auth_config.config =
+ ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
+ cdesc->qat_hash_alg, digestsize);
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
+ state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
+ memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
+ + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
+
+ memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
+ cdesc->cd_cur_ptr += state1_size + state2_size
+ + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
+ auth_param->hash_state_sz =
+ RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
+
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
+ authkey, authkeylen, cdesc->cd_cur_ptr,
+ &state1_size)) {
+ PMD_DRV_LOG(ERR, "(MD5)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_NULL:
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
+ state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
+ memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
+ pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
+ + authkeylen);
+ /*
+ * The Inner Hash Initial State2 block must contain IK
+ * (Initialisation Key), followed by IK XOR-ed with KM
+ * (Key Modifier): IK||(IK^KM).
+ */
+ /* write the auth key */
+ memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
+ /* initialise temp key with auth key */
+ memcpy(pTempKey, authkey, authkeylen);
+ /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
+ for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
+ pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
+ return -EFAULT;
+ }
+
+ /* Request template setup */
+ qat_alg_init_common_hdr(header, qat_proto_flag);
+ header->service_cmd_id = cdesc->qat_cmd;
+
+ /* Auth CD config setup */
+ hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
+ hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+ hash_cd_ctrl->inner_res_sz = digestsize;
+ hash_cd_ctrl->final_sz = digestsize;
+ hash_cd_ctrl->inner_state1_sz = state1_size;
+ auth_param->auth_res_sz = digestsize;
+
+ hash_cd_ctrl->inner_state2_sz = state2_size;
+ hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+ ((sizeof(struct icp_qat_hw_auth_setup) +
+ RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
+ >> 3);
+
+ cdesc->cd_cur_ptr += state1_size + state2_size;
+ cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
+
+ cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
+ cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
+
+ return 0;
+}
+
+int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_AES_128_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+ break;
+ case ICP_QAT_HW_AES_192_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
+ break;
+ case ICP_QAT_HW_AES_256_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_alg_validate_aes_docsisbpi_key(int key_len,
+ enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_AES_128_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_KASUMI_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_DES_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case QAT_3DES_KEY_SZ_OPT1:
+ case QAT_3DES_KEY_SZ_OPT2:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_crypto.c b/src/seastar/dpdk/drivers/crypto/qat/qat_crypto.c
new file mode 100644
index 00000000..386aa453
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_crypto.c
@@ -0,0 +1,1293 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <string.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_ether.h>
+#include <rte_malloc.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_string_fns.h>
+#include <rte_spinlock.h>
+#include <rte_hexdump.h>
+#include <rte_crypto_sym.h>
+#include <openssl/evp.h>
+
+#include "qat_logs.h"
+#include "qat_algs.h"
+#include "qat_crypto.h"
+#include "adf_transport_access_macros.h"
+
+#define BYTE_LENGTH 8
+
+static int
+qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
+ struct qat_pmd_private *internals) {
+ int i = 0;
+ const struct rte_cryptodev_capabilities *capability;
+
+ while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ continue;
+
+ if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
+ continue;
+
+ if (capability->sym.cipher.algo == algo)
+ return 1;
+ }
+ return 0;
+}
+
+static int
+qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
+ struct qat_pmd_private *internals) {
+ int i = 0;
+ const struct rte_cryptodev_capabilities *capability;
+
+ while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ continue;
+
+ if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
+ continue;
+
+ if (capability->sym.auth.algo == algo)
+ return 1;
+ }
+ return 0;
+}
+
+/** Encrypt a single partial block
+ * Depends on openssl libcrypto
+ * Uses ECB+XOR to do CFB encryption, same result, more performant
+ */
+static inline int
+bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int ivlen, int srclen,
+ void *bpi_ctx)
+{
+ EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+ int encrypted_ivlen;
+ uint8_t encrypted_iv[16];
+ int i;
+
+ /* ECB method: encrypt the IV, then XOR this with plaintext */
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+ <= 0)
+ goto cipher_encrypt_err;
+
+ for (i = 0; i < srclen; i++)
+ *(dst+i) = *(src+i)^(encrypted_iv[i]);
+
+ return 0;
+
+cipher_encrypt_err:
+ PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
+ return -EINVAL;
+}
+
+/** Decrypt a single partial block
+ * Depends on openssl libcrypto
+ * Uses ECB+XOR to do CFB encryption, same result, more performant
+ */
+static inline int
+bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int ivlen, int srclen,
+ void *bpi_ctx)
+{
+ EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+ int encrypted_ivlen;
+ uint8_t encrypted_iv[16];
+ int i;
+
+ /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+ <= 0)
+ goto cipher_decrypt_err;
+
+ for (i = 0; i < srclen; i++)
+ *(dst+i) = *(src+i)^(encrypted_iv[i]);
+
+ return 0;
+
+cipher_decrypt_err:
+ PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt for BPI IV failed");
+ return -EINVAL;
+}
+
+/** Creates a context in either AES or DES in ECB mode
+ * Depends on openssl libcrypto
+ */
+static void *
+bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
+ enum rte_crypto_cipher_operation direction __rte_unused,
+ uint8_t *key)
+{
+ const EVP_CIPHER *algo = NULL;
+ EVP_CIPHER_CTX *ctx = EVP_CIPHER_CTX_new();
+
+ if (ctx == NULL)
+ goto ctx_init_err;
+
+ if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
+ algo = EVP_des_ecb();
+ else
+ algo = EVP_aes_128_ecb();
+
+ /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
+ if (EVP_EncryptInit_ex(ctx, algo, NULL, key, 0) != 1)
+ goto ctx_init_err;
+
+ return ctx;
+
+ctx_init_err:
+ if (ctx != NULL)
+ EVP_CIPHER_CTX_free(ctx);
+ return NULL;
+}
+
+/** Frees a context previously created
+ * Depends on openssl libcrypto
+ */
+static void
+bpi_cipher_ctx_free(void *bpi_ctx)
+{
+ if (bpi_ctx != NULL)
+ EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
+}
+
+static inline uint32_t
+adf_modulo(uint32_t data, uint32_t shift);
+
+static inline int
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
+ struct qat_crypto_op_cookie *qat_op_cookie);
+
+void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
+ void *session)
+{
+ struct qat_session *sess = session;
+ phys_addr_t cd_paddr;
+
+ PMD_INIT_FUNC_TRACE();
+ if (sess) {
+ if (sess->bpi_ctx) {
+ bpi_cipher_ctx_free(sess->bpi_ctx);
+ sess->bpi_ctx = NULL;
+ }
+ cd_paddr = sess->cd_paddr;
+ memset(sess, 0, qat_crypto_sym_get_session_private_size(dev));
+ sess->cd_paddr = cd_paddr;
+ } else
+ PMD_DRV_LOG(ERR, "NULL session");
+}
+
+static int
+qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+{
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
+ return ICP_QAT_FW_LA_CMD_CIPHER;
+
+ /* Authentication Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
+ return ICP_QAT_FW_LA_CMD_AUTH;
+
+ if (xform->next == NULL)
+ return -1;
+
+ /* Cipher then Authenticate */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+
+ /* Authenticate then Cipher */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+
+ return -1;
+}
+
+static struct rte_crypto_auth_xform *
+qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return &xform->auth;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+static struct rte_crypto_cipher_xform *
+qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return &xform->cipher;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+void *
+qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private)
+{
+ struct qat_session *session = session_private;
+ struct qat_pmd_private *internals = dev->data->dev_private;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+
+ /* Get cipher xform from crypto xform chain */
+ cipher_xform = qat_get_cipher_xform(xform);
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ if (qat_alg_validate_aes_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_GCM:
+ if (qat_alg_validate_aes_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ if (qat_alg_validate_aes_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ if (qat_alg_validate_3des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ if (qat_alg_validate_des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ if (qat_alg_validate_3des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+ session->bpi_ctx = bpi_cipher_ctx_init(
+ cipher_xform->algo,
+ cipher_xform->op,
+ cipher_xform->key.data);
+ if (session->bpi_ctx == NULL) {
+ PMD_DRV_LOG(ERR, "failed to create DES BPI ctx");
+ goto error_out;
+ }
+ if (qat_alg_validate_des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
+ session->bpi_ctx = bpi_cipher_ctx_init(
+ cipher_xform->algo,
+ cipher_xform->op,
+ cipher_xform->key.data);
+ if (session->bpi_ctx == NULL) {
+ PMD_DRV_LOG(ERR, "failed to create AES BPI ctx");
+ goto error_out;
+ }
+ if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ if (!qat_is_cipher_alg_supported(
+ cipher_xform->algo, internals)) {
+ PMD_DRV_LOG(ERR, "%s not supported on this device",
+ rte_crypto_cipher_algorithm_strings
+ [cipher_xform->algo]);
+ goto error_out;
+ }
+ if (qat_alg_validate_zuc_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_CCM:
+ case RTE_CRYPTO_CIPHER_AES_F8:
+ case RTE_CRYPTO_CIPHER_AES_XTS:
+ case RTE_CRYPTO_CIPHER_ARC4:
+ PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
+ cipher_xform->algo);
+ goto error_out;
+ default:
+ PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
+ cipher_xform->algo);
+ goto error_out;
+ }
+
+ if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ else
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+
+ if (qat_alg_aead_session_create_content_desc_cipher(session,
+ cipher_xform->key.data,
+ cipher_xform->key.length))
+ goto error_out;
+
+ return session;
+
+error_out:
+ if (session->bpi_ctx) {
+ bpi_cipher_ctx_free(session->bpi_ctx);
+ session->bpi_ctx = NULL;
+ }
+ return NULL;
+}
+
+
+void *
+qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private)
+{
+ struct qat_session *session = session_private;
+
+ int qat_cmd_id;
+ PMD_INIT_FUNC_TRACE();
+
+ /* Get requested QAT command id */
+ qat_cmd_id = qat_get_cmd_id(xform);
+ if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
+ PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
+ goto error_out;
+ }
+ session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
+ switch (session->qat_cmd) {
+ case ICP_QAT_FW_LA_CMD_CIPHER:
+ session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
+ break;
+ case ICP_QAT_FW_LA_CMD_AUTH:
+ session = qat_crypto_sym_configure_session_auth(dev, xform, session);
+ break;
+ case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
+ session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
+ session = qat_crypto_sym_configure_session_auth(dev, xform, session);
+ break;
+ case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
+ session = qat_crypto_sym_configure_session_auth(dev, xform, session);
+ session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
+ break;
+ case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
+ case ICP_QAT_FW_LA_CMD_TRNG_TEST:
+ case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_MGF1:
+ case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
+ case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
+ case ICP_QAT_FW_LA_CMD_DELIMITER:
+ PMD_DRV_LOG(ERR, "Unsupported Service %u",
+ session->qat_cmd);
+ goto error_out;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported Service %u",
+ session->qat_cmd);
+ goto error_out;
+ }
+
+ return session;
+
+error_out:
+ return NULL;
+}
+
+struct qat_session *
+qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_session *session_private)
+{
+
+ struct qat_session *session = session_private;
+ struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ struct qat_pmd_private *internals = dev->data->dev_private;
+ auth_xform = qat_get_auth_xform(xform);
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_GCM:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+ break;
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+ break;
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
+ break;
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
+ PMD_DRV_LOG(ERR, "%s not supported on this device",
+ rte_crypto_auth_algorithm_strings
+ [auth_xform->algo]);
+ goto error_out;
+ }
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_CCM:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
+ auth_xform->algo);
+ goto error_out;
+ default:
+ PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
+ auth_xform->algo);
+ goto error_out;
+ }
+ cipher_xform = qat_get_cipher_xform(xform);
+
+ if ((session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
+ (session->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
+ if (qat_alg_aead_session_create_content_desc_auth(session,
+ cipher_xform->key.data,
+ cipher_xform->key.length,
+ auth_xform->add_auth_data_length,
+ auth_xform->digest_length,
+ auth_xform->op))
+ goto error_out;
+ } else {
+ if (qat_alg_aead_session_create_content_desc_auth(session,
+ auth_xform->key.data,
+ auth_xform->key.length,
+ auth_xform->add_auth_data_length,
+ auth_xform->digest_length,
+ auth_xform->op))
+ goto error_out;
+ }
+ return session;
+
+error_out:
+ return NULL;
+}
+
+unsigned qat_crypto_sym_get_session_private_size(
+ struct rte_cryptodev *dev __rte_unused)
+{
+ return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
+}
+
+static inline uint32_t
+qat_bpicipher_preprocess(struct qat_session *ctx,
+ struct rte_crypto_op *op)
+{
+ uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint8_t last_block_len = sym_op->cipher.data.length % block_len;
+
+ if (last_block_len &&
+ ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+
+ /* Decrypt last block */
+ uint8_t *last_block, *dst, *iv;
+ uint32_t last_block_offset = sym_op->cipher.data.offset +
+ sym_op->cipher.data.length - last_block_len;
+ last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+ uint8_t *, last_block_offset);
+
+ if (unlikely(sym_op->m_dst != NULL))
+ /* out-of-place operation (OOP) */
+ dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+ uint8_t *, last_block_offset);
+ else
+ dst = last_block;
+
+ if (last_block_len < sym_op->cipher.data.length)
+ /* use previous block ciphertext as IV */
+ iv = last_block - block_len;
+ else
+ /* runt block, i.e. less than one full block */
+ iv = sym_op->cipher.iv.data;
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+ rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
+ last_block_len);
+ if (sym_op->m_dst != NULL)
+ rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
+ last_block_len);
+#endif
+ bpi_cipher_decrypt(last_block, dst, iv, block_len,
+ last_block_len, ctx->bpi_ctx);
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+ rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
+ last_block_len);
+ if (sym_op->m_dst != NULL)
+ rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
+ last_block_len);
+#endif
+ }
+
+ return sym_op->cipher.data.length - last_block_len;
+}
+
+static inline uint32_t
+qat_bpicipher_postprocess(struct qat_session *ctx,
+ struct rte_crypto_op *op)
+{
+ uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint8_t last_block_len = sym_op->cipher.data.length % block_len;
+
+ if (last_block_len > 0 &&
+ ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+
+ /* Encrypt last block */
+ uint8_t *last_block, *dst, *iv;
+ uint32_t last_block_offset;
+
+ last_block_offset = sym_op->cipher.data.offset +
+ sym_op->cipher.data.length - last_block_len;
+ last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+ uint8_t *, last_block_offset);
+
+ if (unlikely(sym_op->m_dst != NULL))
+ /* out-of-place operation (OOP) */
+ dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+ uint8_t *, last_block_offset);
+ else
+ dst = last_block;
+
+ if (last_block_len < sym_op->cipher.data.length)
+ /* use previous block ciphertext as IV */
+ iv = dst - block_len;
+ else
+ /* runt block, i.e. less than one full block */
+ iv = sym_op->cipher.iv.data;
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+ rte_hexdump(stdout, "BPI: src before post-process:", last_block,
+ last_block_len);
+ if (sym_op->m_dst != NULL)
+ rte_hexdump(stdout, "BPI: dst before post-process:",
+ dst, last_block_len);
+#endif
+ bpi_cipher_encrypt(last_block, dst, iv, block_len,
+ last_block_len, ctx->bpi_ctx);
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+ rte_hexdump(stdout, "BPI: src after post-process:", last_block,
+ last_block_len);
+ if (sym_op->m_dst != NULL)
+ rte_hexdump(stdout, "BPI: dst after post-process:", dst,
+ last_block_len);
+#endif
+ }
+ return sym_op->cipher.data.length - last_block_len;
+}
+
+uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ register struct qat_queue *queue;
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+ register uint32_t nb_ops_sent = 0;
+ register struct rte_crypto_op **cur_op = ops;
+ register int ret;
+ uint16_t nb_ops_possible = nb_ops;
+ register uint8_t *base_addr;
+ register uint32_t tail;
+ int overflow;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ /* read params used a lot in main loop into registers */
+ queue = &(tmp_qp->tx_q);
+ base_addr = (uint8_t *)queue->base_addr;
+ tail = queue->tail;
+
+ /* Find how many can actually fit on the ring */
+ overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
+ - queue->max_inflights;
+ if (overflow > 0) {
+ rte_atomic16_sub(&tmp_qp->inflights16, overflow);
+ nb_ops_possible = nb_ops - overflow;
+ if (nb_ops_possible == 0)
+ return 0;
+ }
+
+ while (nb_ops_sent != nb_ops_possible) {
+ ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
+ tmp_qp->op_cookies[tail / queue->msg_size]);
+ if (ret != 0) {
+ tmp_qp->stats.enqueue_err_count++;
+ /*
+ * This message cannot be enqueued,
+ * decrease number of ops that wasnt sent
+ */
+ rte_atomic16_sub(&tmp_qp->inflights16,
+ nb_ops_possible - nb_ops_sent);
+ if (nb_ops_sent == 0)
+ return 0;
+ goto kick_tail;
+ }
+
+ tail = adf_modulo(tail + queue->msg_size, queue->modulo);
+ nb_ops_sent++;
+ cur_op++;
+ }
+kick_tail:
+ WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, tail);
+ queue->tail = tail;
+ tmp_qp->stats.enqueued_count += nb_ops_sent;
+ return nb_ops_sent;
+}
+
+uint16_t
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct qat_queue *queue;
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+ uint32_t msg_counter = 0;
+ struct rte_crypto_op *rx_op;
+ struct icp_qat_fw_comn_resp *resp_msg;
+
+ queue = &(tmp_qp->rx_q);
+ resp_msg = (struct icp_qat_fw_comn_resp *)
+ ((uint8_t *)queue->base_addr + queue->head);
+
+ while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
+ msg_counter != nb_ops) {
+ rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+ rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comn_resp));
+
+#endif
+ if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+ resp_msg->comn_hdr.comn_status)) {
+ rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ struct qat_session *sess = (struct qat_session *)
+ (rx_op->sym->session->_private);
+ if (sess->bpi_ctx)
+ qat_bpicipher_postprocess(sess, rx_op);
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+
+ *(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
+ queue->head = adf_modulo(queue->head +
+ queue->msg_size,
+ ADF_RING_SIZE_MODULO(queue->queue_size));
+ resp_msg = (struct icp_qat_fw_comn_resp *)
+ ((uint8_t *)queue->base_addr +
+ queue->head);
+ *ops = rx_op;
+ ops++;
+ msg_counter++;
+ }
+ if (msg_counter > 0) {
+ WRITE_CSR_RING_HEAD(tmp_qp->mmap_bar_addr,
+ queue->hw_bundle_number,
+ queue->hw_queue_number, queue->head);
+ rte_atomic16_sub(&tmp_qp->inflights16, msg_counter);
+ tmp_qp->stats.dequeued_count += msg_counter;
+ }
+ return msg_counter;
+}
+
+static inline int
+qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
+ struct qat_alg_buf_list *list, uint32_t data_len)
+{
+ int nr = 1;
+
+ uint32_t buf_len = rte_pktmbuf_mtophys(buf) -
+ buff_start + rte_pktmbuf_data_len(buf);
+
+ list->bufers[0].addr = buff_start;
+ list->bufers[0].resrvd = 0;
+ list->bufers[0].len = buf_len;
+
+ if (data_len <= buf_len) {
+ list->num_bufs = nr;
+ list->bufers[0].len = data_len;
+ return 0;
+ }
+
+ buf = buf->next;
+ while (buf) {
+ if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
+ PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
+ " entry(%u)",
+ QAT_SGL_MAX_NUMBER);
+ return -EINVAL;
+ }
+
+ list->bufers[nr].len = rte_pktmbuf_data_len(buf);
+ list->bufers[nr].resrvd = 0;
+ list->bufers[nr].addr = rte_pktmbuf_mtophys(buf);
+
+ buf_len += list->bufers[nr].len;
+ buf = buf->next;
+
+ if (buf_len > data_len) {
+ list->bufers[nr].len -=
+ buf_len - data_len;
+ buf = NULL;
+ }
+ ++nr;
+ }
+ list->num_bufs = nr;
+
+ return 0;
+}
+
+static inline int
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
+ struct qat_crypto_op_cookie *qat_op_cookie)
+{
+ int ret = 0;
+ struct qat_session *ctx;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ register struct icp_qat_fw_la_bulk_req *qat_req;
+ uint8_t do_auth = 0, do_cipher = 0;
+ uint32_t cipher_len = 0, cipher_ofs = 0;
+ uint32_t auth_len = 0, auth_ofs = 0;
+ uint32_t min_ofs = 0;
+ uint64_t src_buf_start = 0, dst_buf_start = 0;
+ uint8_t do_sgl = 0;
+
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
+ return -EINVAL;
+ }
+#endif
+ if (unlikely(op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
+ PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
+ " requests, op (%p) is sessionless.", op);
+ return -EINVAL;
+ }
+
+ if (unlikely(op->sym->session->dev_type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
+ PMD_DRV_LOG(ERR, "Session was not created for this device");
+ return -EINVAL;
+ }
+
+ ctx = (struct qat_session *)op->sym->session->_private;
+ qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+ rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+ qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+ cipher_param = (void *)&qat_req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+
+ if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+ ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ do_auth = 1;
+ do_cipher = 1;
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
+ do_auth = 1;
+ do_cipher = 0;
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+ do_auth = 0;
+ do_cipher = 1;
+ }
+
+ if (do_cipher) {
+
+ if (ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+ ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+ ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+
+ if (unlikely(
+ (cipher_param->cipher_length % BYTE_LENGTH != 0)
+ || (cipher_param->cipher_offset
+ % BYTE_LENGTH != 0))) {
+ PMD_DRV_LOG(ERR,
+ "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ cipher_len = op->sym->cipher.data.length >> 3;
+ cipher_ofs = op->sym->cipher.data.offset >> 3;
+
+ } else if (ctx->bpi_ctx) {
+ /* DOCSIS - only send complete blocks to device
+ * Process any partial block using CFB mode.
+ * Even if 0 complete blocks, still send this to device
+ * to get into rx queue for post-process and dequeuing
+ */
+ cipher_len = qat_bpicipher_preprocess(ctx, op);
+ cipher_ofs = op->sym->cipher.data.offset;
+ } else {
+ cipher_len = op->sym->cipher.data.length;
+ cipher_ofs = op->sym->cipher.data.offset;
+ }
+
+ /* copy IV into request if it fits */
+ /*
+ * If IV length is zero do not copy anything but still
+ * use request descriptor embedded IV
+ *
+ */
+ if (op->sym->cipher.iv.length) {
+ if (op->sym->cipher.iv.length <=
+ sizeof(cipher_param->u.cipher_IV_array)) {
+ rte_memcpy(cipher_param->u.cipher_IV_array,
+ op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
+ } else {
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ cipher_param->u.s.cipher_IV_ptr =
+ op->sym->cipher.iv.phys_addr;
+ }
+ }
+ min_ofs = cipher_ofs;
+ }
+
+ if (do_auth) {
+
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+ if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
+ || (auth_param->auth_len % BYTE_LENGTH != 0))) {
+ PMD_DRV_LOG(ERR,
+ "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ auth_ofs = op->sym->auth.data.offset >> 3;
+ auth_len = op->sym->auth.data.length >> 3;
+
+ if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) {
+ if (do_cipher) {
+ auth_len = auth_len + auth_ofs + 1 -
+ ICP_QAT_HW_KASUMI_BLK_SZ;
+ auth_ofs = ICP_QAT_HW_KASUMI_BLK_SZ;
+ } else {
+ auth_len = auth_len + auth_ofs + 1;
+ auth_ofs = 0;
+ }
+ }
+
+ } else if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ auth_ofs = op->sym->cipher.data.offset;
+ auth_len = op->sym->cipher.data.length;
+ } else {
+ auth_ofs = op->sym->auth.data.offset;
+ auth_len = op->sym->auth.data.length;
+ }
+ min_ofs = auth_ofs;
+
+ auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
+
+ auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
+
+ }
+
+ if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
+ do_sgl = 1;
+
+ /* adjust for chain case */
+ if (do_cipher && do_auth)
+ min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+
+ if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
+ min_ofs = 0;
+
+ if (unlikely(op->sym->m_dst != NULL)) {
+ /* Out-of-place operation (OOP)
+ * Don't align DMA start. DMA the minimum data-set
+ * so as not to overwrite data in dest buffer
+ */
+ src_buf_start =
+ rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs);
+ dst_buf_start =
+ rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs);
+
+ } else {
+ /* In-place operation
+ * Start DMA at nearest aligned address below min_ofs
+ */
+ src_buf_start =
+ rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs)
+ & QAT_64_BTYE_ALIGN_MASK;
+
+ if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src) -
+ rte_pktmbuf_headroom(op->sym->m_src))
+ > src_buf_start)) {
+ /* alignment has pushed addr ahead of start of mbuf
+ * so revert and take the performance hit
+ */
+ src_buf_start =
+ rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ min_ofs);
+ }
+ dst_buf_start = src_buf_start;
+ }
+
+ if (do_cipher) {
+ cipher_param->cipher_offset =
+ (uint32_t)rte_pktmbuf_mtophys_offset(
+ op->sym->m_src, cipher_ofs) - src_buf_start;
+ cipher_param->cipher_length = cipher_len;
+ } else {
+ cipher_param->cipher_offset = 0;
+ cipher_param->cipher_length = 0;
+ }
+ if (do_auth) {
+ auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
+ op->sym->m_src, auth_ofs) - src_buf_start;
+ auth_param->auth_len = auth_len;
+ } else {
+ auth_param->auth_off = 0;
+ auth_param->auth_len = 0;
+ }
+ qat_req->comn_mid.dst_length =
+ qat_req->comn_mid.src_length =
+ (cipher_param->cipher_offset + cipher_param->cipher_length)
+ > (auth_param->auth_off + auth_param->auth_len) ?
+ (cipher_param->cipher_offset + cipher_param->cipher_length)
+ : (auth_param->auth_off + auth_param->auth_len);
+
+ if (do_sgl) {
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+ ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
+ &qat_op_cookie->qat_sgl_list_src,
+ qat_req->comn_mid.src_length);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
+ return ret;
+ }
+
+ if (likely(op->sym->m_dst == NULL))
+ qat_req->comn_mid.dest_data_addr =
+ qat_req->comn_mid.src_data_addr =
+ qat_op_cookie->qat_sgl_src_phys_addr;
+ else {
+ ret = qat_sgl_fill_array(op->sym->m_dst,
+ dst_buf_start,
+ &qat_op_cookie->qat_sgl_list_dst,
+ qat_req->comn_mid.dst_length);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR, "QAT PMD Cannot "
+ "fill sgl array");
+ return ret;
+ }
+
+ qat_req->comn_mid.src_data_addr =
+ qat_op_cookie->qat_sgl_src_phys_addr;
+ qat_req->comn_mid.dest_data_addr =
+ qat_op_cookie->qat_sgl_dst_phys_addr;
+ }
+ } else {
+ qat_req->comn_mid.src_data_addr = src_buf_start;
+ qat_req->comn_mid.dest_data_addr = dst_buf_start;
+ }
+
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ if (op->sym->cipher.iv.length == 12) {
+ /*
+ * For GCM a 12 bit IV is allowed,
+ * but we need to inform the f/w
+ */
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ }
+ if (op->sym->cipher.data.length == 0) {
+ /*
+ * GMAC
+ */
+ qat_req->comn_mid.dest_data_addr =
+ qat_req->comn_mid.src_data_addr =
+ op->sym->auth.aad.phys_addr;
+ qat_req->comn_mid.dst_length =
+ qat_req->comn_mid.src_length =
+ rte_pktmbuf_data_len(op->sym->m_src);
+ cipher_param->cipher_length = 0;
+ cipher_param->cipher_offset = 0;
+ auth_param->u1.aad_adr = 0;
+ auth_param->auth_len = op->sym->auth.aad.length;
+ auth_param->auth_off = op->sym->auth.data.offset;
+ auth_param->u2.aad_sz = 0;
+ }
+ }
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+ rte_hexdump(stdout, "qat_req:", qat_req,
+ sizeof(struct icp_qat_fw_la_bulk_req));
+ rte_hexdump(stdout, "src_data:",
+ rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
+ rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
+ op->sym->auth.digest.length);
+ rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
+ op->sym->auth.aad.length);
+#endif
+ return 0;
+}
+
+static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
+{
+ uint32_t div = data >> shift;
+ uint32_t mult = div << shift;
+
+ return data - mult;
+}
+
+void qat_crypto_sym_session_init(struct rte_mempool *mp, void *sym_sess)
+{
+ struct rte_cryptodev_sym_session *sess = sym_sess;
+ struct qat_session *s = (void *)sess->_private;
+
+ PMD_INIT_FUNC_TRACE();
+ s->cd_paddr = rte_mempool_virt2phy(mp, sess) +
+ offsetof(struct qat_session, cd) +
+ offsetof(struct rte_cryptodev_sym_session, _private);
+}
+
+int qat_dev_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ PMD_INIT_FUNC_TRACE();
+ return 0;
+}
+
+int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ return 0;
+}
+
+void qat_dev_stop(__rte_unused struct rte_cryptodev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+int qat_dev_close(struct rte_cryptodev *dev)
+{
+ int i, ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = qat_crypto_sym_qp_release(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct qat_pmd_private *internals = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ if (info != NULL) {
+ info->max_nb_queue_pairs =
+ ADF_NUM_SYM_QPS_PER_BUNDLE *
+ ADF_NUM_BUNDLES_PER_DEV;
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = internals->qat_dev_capabilities;
+ info->sym.max_nb_sessions = internals->max_nb_sessions;
+ info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
+ }
+}
+
+void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int i;
+ struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
+
+ PMD_INIT_FUNC_TRACE();
+ if (stats == NULL) {
+ PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
+ return;
+ }
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ if (qp[i] == NULL) {
+ PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
+ continue;
+ }
+
+ stats->enqueued_count += qp[i]->stats.enqueued_count;
+ stats->dequeued_count += qp[i]->stats.dequeued_count;
+ stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
+ }
+}
+
+void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
+{
+ int i;
+ struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
+
+ PMD_INIT_FUNC_TRACE();
+ for (i = 0; i < dev->data->nb_queue_pairs; i++)
+ memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
+ PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
+}
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_crypto.h b/src/seastar/dpdk/drivers/crypto/qat/qat_crypto.h
new file mode 100644
index 00000000..b740d6b0
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_crypto.h
@@ -0,0 +1,141 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _QAT_CRYPTO_H_
+#define _QAT_CRYPTO_H_
+
+#include <rte_cryptodev_pmd.h>
+#include <rte_memzone.h>
+
+#include "qat_crypto_capabilities.h"
+
+/*
+ * This macro rounds up a number to a be a multiple of
+ * the alignment when the alignment is a power of 2
+ */
+#define ALIGN_POW2_ROUNDUP(num, align) \
+ (((num) + (align) - 1) & ~((align) - 1))
+#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
+
+/**
+ * Structure associated with each queue.
+ */
+struct qat_queue {
+ char memz_name[RTE_MEMZONE_NAMESIZE];
+ void *base_addr; /* Base address */
+ phys_addr_t base_phys_addr; /* Queue physical address */
+ uint32_t head; /* Shadow copy of the head */
+ uint32_t tail; /* Shadow copy of the tail */
+ uint32_t modulo;
+ uint32_t msg_size;
+ uint16_t max_inflights;
+ uint32_t queue_size;
+ uint8_t hw_bundle_number;
+ uint8_t hw_queue_number;
+ /* HW queue aka ring offset on bundle */
+};
+
+struct qat_qp {
+ void *mmap_bar_addr;
+ rte_atomic16_t inflights16;
+ struct qat_queue tx_q;
+ struct qat_queue rx_q;
+ struct rte_cryptodev_stats stats;
+ struct rte_mempool *op_cookie_pool;
+ void **op_cookies;
+ uint32_t nb_descriptors;
+} __rte_cache_aligned;
+
+/** private data structure for each QAT device */
+struct qat_pmd_private {
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+ unsigned max_nb_sessions;
+ /**< Max number of sessions supported by device */
+ const struct rte_cryptodev_capabilities *qat_dev_capabilities;
+};
+
+int qat_dev_config(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *config);
+int qat_dev_start(struct rte_cryptodev *dev);
+void qat_dev_stop(struct rte_cryptodev *dev);
+int qat_dev_close(struct rte_cryptodev *dev);
+void qat_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info);
+
+void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats);
+void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev);
+
+int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *rx_conf, int socket_id);
+int qat_crypto_sym_qp_release(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id);
+
+int
+qat_pmd_session_mempool_create(struct rte_cryptodev *dev,
+ unsigned nb_objs, unsigned obj_cache_size, int socket_id);
+
+extern unsigned
+qat_crypto_sym_get_session_private_size(struct rte_cryptodev *dev);
+
+extern void
+qat_crypto_sym_session_init(struct rte_mempool *mempool, void *priv_sess);
+
+extern void *
+qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private);
+
+struct qat_session *
+qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_session *session_private);
+
+void *
+qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private);
+
+
+extern void
+qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
+
+
+extern uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+extern uint16_t
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+#endif /* _QAT_CRYPTO_H_ */
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_crypto_capabilities.h b/src/seastar/dpdk/drivers/crypto/qat/qat_crypto_capabilities.h
new file mode 100644
index 00000000..1294f247
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_crypto_capabilities.h
@@ -0,0 +1,574 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _QAT_CRYPTO_CAPABILITIES_H_
+#define _QAT_CRYPTO_CAPABILITIES_H_
+
+#define QAT_BASE_CPM16_SYM_CAPABILITIES \
+ { /* SHA1 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 20, \
+ .max = 20, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA224 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA256 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA384 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 128, \
+ .max = 128, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA512 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 128, \
+ .max = 128, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* MD5 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 8, \
+ .max = 64, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES XCBC MAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES GCM (AUTH) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_AES_GCM, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 8, \
+ .max = 16, \
+ .increment = 4 \
+ }, \
+ .aad_size = { \
+ .min = 0, \
+ .max = 240, \
+ .increment = 1 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES GMAC (AUTH) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 8, \
+ .max = 16, \
+ .increment = 4 \
+ }, \
+ .aad_size = { \
+ .min = 1, \
+ .max = 65535, \
+ .increment = 1 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* SNOW 3G (UIA2) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 4, \
+ .max = 4, \
+ .increment = 0 \
+ }, \
+ .aad_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES GCM (CIPHER) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_GCM, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 12, \
+ .max = 12, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES DOCSIS BPI */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,\
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* SNOW 3G (UEA2) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CTR */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* NULL (AUTH) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_NULL, \
+ .block_size = 1, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, }, \
+ }, }, \
+ }, \
+ { /* NULL (CIPHER) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_NULL, \
+ .block_size = 1, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ } \
+ }, }, \
+ }, } \
+ }, \
+ { /* KASUMI (F8) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_KASUMI_F8, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* KASUMI (F9) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_KASUMI_F9, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 4, \
+ .max = 4, \
+ .increment = 0 \
+ }, \
+ .aad_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* 3DES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 24, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* 3DES CTR */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_3DES_CTR, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 24, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* DES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_DES_CBC, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* DES DOCSISBPI */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,\
+ .block_size = 8, \
+ .key_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }
+
+#define QAT_EXTRA_CPM17_SYM_CAPABILITIES \
+ { /* ZUC (EEA3) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* ZUC (EIA3) */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_ZUC_EIA3, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 4, \
+ .max = 4, \
+ .increment = 0 \
+ }, \
+ .aad_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }
+
+#endif /* _QAT_CRYPTO_CAPABILITIES_H_ */
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_logs.h b/src/seastar/dpdk/drivers/crypto/qat/qat_logs.h
new file mode 100644
index 00000000..a909f630
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_logs.h
@@ -0,0 +1,78 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _QAT_LOGS_H_
+#define _QAT_LOGS_H_
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
+ "PMD: %s(): " fmt "\n", __func__, ##args)
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE() do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#else
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
+#endif
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _QAT_LOGS_H_ */
diff --git a/src/seastar/dpdk/drivers/crypto/qat/qat_qp.c b/src/seastar/dpdk/drivers/crypto/qat/qat_qp.c
new file mode 100644
index 00000000..a358ccd7
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/qat_qp.c
@@ -0,0 +1,490 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_atomic.h>
+#include <rte_prefetch.h>
+
+#include "qat_logs.h"
+#include "qat_crypto.h"
+#include "qat_algs.h"
+#include "adf_transport_access_macros.h"
+
+#define ADF_MAX_SYM_DESC 4096
+#define ADF_MIN_SYM_DESC 128
+#define ADF_SYM_TX_RING_DESC_SIZE 128
+#define ADF_SYM_RX_RING_DESC_SIZE 32
+#define ADF_SYM_TX_QUEUE_STARTOFF 2
+/* Offset from bundle start to 1st Sym Tx queue */
+#define ADF_SYM_RX_QUEUE_STARTOFF 10
+#define ADF_ARB_REG_SLOT 0x1000
+#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
+
+#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
+ ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+ (ADF_ARB_REG_SLOT * index), value)
+
+static int qat_qp_check_queue_alignment(uint64_t phys_addr,
+ uint32_t queue_size_bytes);
+static int qat_tx_queue_create(struct rte_cryptodev *dev,
+ struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
+ int socket_id);
+static int qat_rx_queue_create(struct rte_cryptodev *dev,
+ struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
+ int socket_id);
+static void qat_queue_delete(struct qat_queue *queue);
+static int qat_queue_create(struct rte_cryptodev *dev,
+ struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
+ int socket_id);
+static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
+ uint32_t *queue_size_for_csr);
+static void adf_configure_queues(struct qat_qp *queue);
+static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
+static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
+
+static const struct rte_memzone *
+queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
+ int socket_id)
+{
+ const struct rte_memzone *mz;
+ unsigned memzone_flags = 0;
+ const struct rte_memseg *ms;
+
+ PMD_INIT_FUNC_TRACE();
+ mz = rte_memzone_lookup(queue_name);
+ if (mz != 0) {
+ if (((size_t)queue_size <= mz->len) &&
+ ((socket_id == SOCKET_ID_ANY) ||
+ (socket_id == mz->socket_id))) {
+ PMD_DRV_LOG(DEBUG, "re-use memzone already "
+ "allocated for %s", queue_name);
+ return mz;
+ }
+
+ PMD_DRV_LOG(ERR, "Incompatible memzone already "
+ "allocated %s, size %u, socket %d. "
+ "Requested size %u, socket %u",
+ queue_name, (uint32_t)mz->len,
+ mz->socket_id, queue_size, socket_id);
+ return NULL;
+ }
+
+ PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
+ queue_name, queue_size, socket_id);
+ ms = rte_eal_get_physmem_layout();
+ switch (ms[0].hugepage_sz) {
+ case(RTE_PGSIZE_2M):
+ memzone_flags = RTE_MEMZONE_2MB;
+ break;
+ case(RTE_PGSIZE_1G):
+ memzone_flags = RTE_MEMZONE_1GB;
+ break;
+ case(RTE_PGSIZE_16M):
+ memzone_flags = RTE_MEMZONE_16MB;
+ break;
+ case(RTE_PGSIZE_16G):
+ memzone_flags = RTE_MEMZONE_16GB;
+ break;
+ default:
+ memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
+}
+#ifdef RTE_LIBRTE_XEN_DOM0
+ return rte_memzone_reserve_bounded(queue_name, queue_size,
+ socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M);
+#else
+ return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
+ memzone_flags, queue_size);
+#endif
+}
+
+int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct qat_qp *qp;
+ struct rte_pci_device *pci_dev;
+ int ret;
+ char op_cookie_pool_name[RTE_RING_NAMESIZE];
+ uint32_t i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* If qp is already in use free ring memory and qp metadata. */
+ if (dev->data->queue_pairs[queue_pair_id] != NULL) {
+ ret = qat_crypto_sym_qp_release(dev, queue_pair_id);
+ if (ret < 0)
+ return ret;
+ }
+
+ if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
+ (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
+ PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
+ qp_conf->nb_descriptors);
+ return -EINVAL;
+ }
+
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ if (pci_dev->mem_resource[0].addr == NULL) {
+ PMD_DRV_LOG(ERR, "Could not find VF config space "
+ "(UIO driver attached?).");
+ return -EINVAL;
+ }
+
+ if (queue_pair_id >=
+ (ADF_NUM_SYM_QPS_PER_BUNDLE *
+ ADF_NUM_BUNDLES_PER_DEV)) {
+ PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
+ queue_pair_id);
+ return -EINVAL;
+ }
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc("qat PMD qp metadata",
+ sizeof(*qp), RTE_CACHE_LINE_SIZE);
+ if (qp == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
+ return -ENOMEM;
+ }
+ qp->nb_descriptors = qp_conf->nb_descriptors;
+ qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
+ qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
+ RTE_CACHE_LINE_SIZE);
+
+ qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
+ rte_atomic16_init(&qp->inflights16);
+
+ if (qat_tx_queue_create(dev, &(qp->tx_q),
+ queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
+ PMD_INIT_LOG(ERR, "Tx queue create failed "
+ "queue_pair_id=%u", queue_pair_id);
+ goto create_err;
+ }
+
+ if (qat_rx_queue_create(dev, &(qp->rx_q),
+ queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
+ PMD_DRV_LOG(ERR, "Rx queue create failed "
+ "queue_pair_id=%hu", queue_pair_id);
+ qat_queue_delete(&(qp->tx_q));
+ goto create_err;
+ }
+
+ adf_configure_queues(qp);
+ adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
+ snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
+ dev->driver->pci_drv.driver.name, dev->data->dev_id,
+ queue_pair_id);
+
+ qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
+ if (qp->op_cookie_pool == NULL)
+ qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
+ qp->nb_descriptors,
+ sizeof(struct qat_crypto_op_cookie), 64, 0,
+ NULL, NULL, NULL, NULL, socket_id,
+ 0);
+ if (!qp->op_cookie_pool) {
+ PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
+ " op mempool");
+ goto create_err;
+ }
+
+ for (i = 0; i < qp->nb_descriptors; i++) {
+ if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
+ PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
+ return -EFAULT;
+ }
+
+ struct qat_crypto_op_cookie *sql_cookie =
+ qp->op_cookies[i];
+
+ sql_cookie->qat_sgl_src_phys_addr =
+ rte_mempool_virt2phy(qp->op_cookie_pool,
+ sql_cookie) +
+ offsetof(struct qat_crypto_op_cookie,
+ qat_sgl_list_src);
+
+ sql_cookie->qat_sgl_dst_phys_addr =
+ rte_mempool_virt2phy(qp->op_cookie_pool,
+ sql_cookie) +
+ offsetof(struct qat_crypto_op_cookie,
+ qat_sgl_list_dst);
+ }
+ dev->data->queue_pairs[queue_pair_id] = qp;
+ return 0;
+
+create_err:
+ rte_free(qp);
+ return -EFAULT;
+}
+
+int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+ struct qat_qp *qp =
+ (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
+ uint32_t i;
+
+ PMD_INIT_FUNC_TRACE();
+ if (qp == NULL) {
+ PMD_DRV_LOG(DEBUG, "qp already freed");
+ return 0;
+ }
+
+ /* Don't free memory if there are still responses to be processed */
+ if (rte_atomic16_read(&(qp->inflights16)) == 0) {
+ qat_queue_delete(&(qp->tx_q));
+ qat_queue_delete(&(qp->rx_q));
+ } else {
+ return -EAGAIN;
+ }
+
+ adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
+
+ for (i = 0; i < qp->nb_descriptors; i++)
+ rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
+
+ if (qp->op_cookie_pool)
+ rte_mempool_free(qp->op_cookie_pool);
+
+ rte_free(qp->op_cookies);
+ rte_free(qp);
+ dev->data->queue_pairs[queue_pair_id] = NULL;
+ return 0;
+}
+
+static int qat_tx_queue_create(struct rte_cryptodev *dev,
+ struct qat_queue *queue, uint8_t qp_id,
+ uint32_t nb_desc, int socket_id)
+{
+ PMD_INIT_FUNC_TRACE();
+ queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
+ queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
+ ADF_SYM_TX_QUEUE_STARTOFF;
+ PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
+ nb_desc, qp_id, queue->hw_bundle_number,
+ queue->hw_queue_number);
+
+ return qat_queue_create(dev, queue, nb_desc,
+ ADF_SYM_TX_RING_DESC_SIZE, socket_id);
+}
+
+static int qat_rx_queue_create(struct rte_cryptodev *dev,
+ struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
+ int socket_id)
+{
+ PMD_INIT_FUNC_TRACE();
+ queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
+ queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
+ ADF_SYM_RX_QUEUE_STARTOFF;
+
+ PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
+ nb_desc, qp_id, queue->hw_bundle_number,
+ queue->hw_queue_number);
+ return qat_queue_create(dev, queue, nb_desc,
+ ADF_SYM_RX_RING_DESC_SIZE, socket_id);
+}
+
+static void qat_queue_delete(struct qat_queue *queue)
+{
+ const struct rte_memzone *mz;
+ int status = 0;
+
+ if (queue == NULL) {
+ PMD_DRV_LOG(DEBUG, "Invalid queue");
+ return;
+ }
+ mz = rte_memzone_lookup(queue->memz_name);
+ if (mz != NULL) {
+ /* Write an unused pattern to the queue memory. */
+ memset(queue->base_addr, 0x7F, queue->queue_size);
+ status = rte_memzone_free(mz);
+ if (status != 0)
+ PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
+ status, queue->memz_name);
+ } else {
+ PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
+ queue->memz_name);
+ }
+}
+
+static int
+qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
+ uint32_t nb_desc, uint8_t desc_size, int socket_id)
+{
+ uint64_t queue_base;
+ void *io_addr;
+ const struct rte_memzone *qp_mz;
+ uint32_t queue_size_bytes = nb_desc*desc_size;
+ struct rte_pci_device *pci_dev;
+
+ PMD_INIT_FUNC_TRACE();
+ if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
+ PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate a memzone for the queue - create a unique name.
+ */
+ snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
+ dev->driver->pci_drv.driver.name, "qp_mem", dev->data->dev_id,
+ queue->hw_bundle_number, queue->hw_queue_number);
+ qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
+ socket_id);
+ if (qp_mz == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
+ return -ENOMEM;
+ }
+
+ queue->base_addr = (char *)qp_mz->addr;
+ queue->base_phys_addr = qp_mz->phys_addr;
+ if (qat_qp_check_queue_alignment(queue->base_phys_addr,
+ queue_size_bytes)) {
+ PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
+ " 0x%"PRIx64"\n",
+ queue->base_phys_addr);
+ return -EFAULT;
+ }
+
+ if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
+ != 0) {
+ PMD_DRV_LOG(ERR, "Invalid num inflights");
+ return -EINVAL;
+ }
+
+ queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
+ ADF_BYTES_TO_MSG_SIZE(desc_size));
+ queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
+ PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
+ " msg_size %u, max_inflights %u modulo %u",
+ queue->queue_size, queue_size_bytes,
+ nb_desc, desc_size, queue->max_inflights,
+ queue->modulo);
+
+ if (queue->max_inflights < 2) {
+ PMD_DRV_LOG(ERR, "Invalid num inflights");
+ return -EINVAL;
+ }
+ queue->head = 0;
+ queue->tail = 0;
+ queue->msg_size = desc_size;
+
+ /*
+ * Write an unused pattern to the queue memory.
+ */
+ memset(queue->base_addr, 0x7F, queue_size_bytes);
+
+ queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
+ queue->queue_size);
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ io_addr = pci_dev->mem_resource[0].addr;
+
+ WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_base);
+ return 0;
+}
+
+static int qat_qp_check_queue_alignment(uint64_t phys_addr,
+ uint32_t queue_size_bytes)
+{
+ PMD_INIT_FUNC_TRACE();
+ if (((queue_size_bytes - 1) & phys_addr) != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
+ uint32_t *p_queue_size_for_csr)
+{
+ uint8_t i = ADF_MIN_RING_SIZE;
+
+ PMD_INIT_FUNC_TRACE();
+ for (; i <= ADF_MAX_RING_SIZE; i++)
+ if ((msg_size * msg_num) ==
+ (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
+ *p_queue_size_for_csr = i;
+ return 0;
+ }
+ PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
+ return -EINVAL;
+}
+
+static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
+{
+ uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+ (ADF_ARB_REG_SLOT *
+ txq->hw_bundle_number);
+ uint32_t value;
+
+ PMD_INIT_FUNC_TRACE();
+ value = ADF_CSR_RD(base_addr, arb_csr_offset);
+ value |= (0x01 << txq->hw_queue_number);
+ ADF_CSR_WR(base_addr, arb_csr_offset, value);
+}
+
+static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
+{
+ uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+ (ADF_ARB_REG_SLOT *
+ txq->hw_bundle_number);
+ uint32_t value;
+
+ PMD_INIT_FUNC_TRACE();
+ value = ADF_CSR_RD(base_addr, arb_csr_offset);
+ value ^= (0x01 << txq->hw_queue_number);
+ ADF_CSR_WR(base_addr, arb_csr_offset, value);
+}
+
+static void adf_configure_queues(struct qat_qp *qp)
+{
+ uint32_t queue_config;
+ struct qat_queue *queue = &qp->tx_q;
+
+ PMD_INIT_FUNC_TRACE();
+ queue_config = BUILD_RING_CONFIG(queue->queue_size);
+
+ WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_config);
+
+ queue = &qp->rx_q;
+ queue_config =
+ BUILD_RESP_RING_CONFIG(queue->queue_size,
+ ADF_RING_NEAR_WATERMARK_512,
+ ADF_RING_NEAR_WATERMARK_0);
+
+ WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_config);
+}
diff --git a/src/seastar/dpdk/drivers/crypto/qat/rte_pmd_qat_version.map b/src/seastar/dpdk/drivers/crypto/qat/rte_pmd_qat_version.map
new file mode 100644
index 00000000..bbaf1c85
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/rte_pmd_qat_version.map
@@ -0,0 +1,3 @@
+DPDK_2.2 {
+ local: *;
+}; \ No newline at end of file
diff --git a/src/seastar/dpdk/drivers/crypto/qat/rte_qat_cryptodev.c b/src/seastar/dpdk/drivers/crypto/qat/rte_qat_cryptodev.c
new file mode 100644
index 00000000..1bdd30da
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -0,0 +1,163 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "qat_crypto.h"
+#include "qat_logs.h"
+
+static const struct rte_cryptodev_capabilities qat_cpm16_capabilities[] = {
+ QAT_BASE_CPM16_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_cryptodev_capabilities qat_cpm17_capabilities[] = {
+ QAT_BASE_CPM16_SYM_CAPABILITIES,
+ QAT_EXTRA_CPM17_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static struct rte_cryptodev_ops crypto_qat_ops = {
+
+ /* Device related operations */
+ .dev_configure = qat_dev_config,
+ .dev_start = qat_dev_start,
+ .dev_stop = qat_dev_stop,
+ .dev_close = qat_dev_close,
+ .dev_infos_get = qat_dev_info_get,
+
+ .stats_get = qat_crypto_sym_stats_get,
+ .stats_reset = qat_crypto_sym_stats_reset,
+ .queue_pair_setup = qat_crypto_sym_qp_setup,
+ .queue_pair_release = qat_crypto_sym_qp_release,
+ .queue_pair_start = NULL,
+ .queue_pair_stop = NULL,
+ .queue_pair_count = NULL,
+
+ /* Crypto related operations */
+ .session_get_size = qat_crypto_sym_get_session_private_size,
+ .session_configure = qat_crypto_sym_configure_session,
+ .session_initialize = qat_crypto_sym_session_init,
+ .session_clear = qat_crypto_sym_clear_session
+};
+
+/*
+ * The set of PCI devices this driver supports
+ */
+
+static const struct rte_pci_id pci_id_qat_map[] = {
+ {
+ RTE_PCI_DEVICE(0x8086, 0x0443),
+ },
+ {
+ RTE_PCI_DEVICE(0x8086, 0x37c9),
+ },
+ {
+ RTE_PCI_DEVICE(0x8086, 0x19e3),
+ },
+ {
+ RTE_PCI_DEVICE(0x8086, 0x6f55),
+ },
+ {.device_id = 0},
+};
+
+static int
+crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_drv,
+ struct rte_cryptodev *cryptodev)
+{
+ struct qat_pmd_private *internals;
+
+ PMD_INIT_FUNC_TRACE();
+ PMD_DRV_LOG(DEBUG, "Found crypto device at %02x:%02x.%x",
+ RTE_DEV_TO_PCI(cryptodev->device)->addr.bus,
+ RTE_DEV_TO_PCI(cryptodev->device)->addr.devid,
+ RTE_DEV_TO_PCI(cryptodev->device)->addr.function);
+
+ cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
+ cryptodev->dev_ops = &crypto_qat_ops;
+
+ cryptodev->enqueue_burst = qat_pmd_enqueue_op_burst;
+ cryptodev->dequeue_burst = qat_pmd_dequeue_op_burst;
+
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
+
+ internals = cryptodev->data->dev_private;
+ internals->max_nb_sessions = RTE_QAT_PMD_MAX_NB_SESSIONS;
+ switch (RTE_DEV_TO_PCI(cryptodev->device)->id.device_id) {
+ case 0x0443:
+ internals->qat_dev_capabilities = qat_cpm16_capabilities;
+ break;
+ case 0x37c9:
+ case 0x19e3:
+ case 0x6f55:
+ internals->qat_dev_capabilities = qat_cpm17_capabilities;
+ break;
+ default:
+ PMD_DRV_LOG(ERR,
+ "Invalid dev_id, can't determine capabilities");
+ break;
+ }
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ PMD_DRV_LOG(DEBUG, "Device already initialised by primary process");
+ return 0;
+ }
+
+ return 0;
+}
+
+static struct rte_cryptodev_driver rte_qat_pmd = {
+ .pci_drv = {
+ .id_table = pci_id_qat_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = rte_cryptodev_pci_probe,
+ .remove = rte_cryptodev_pci_remove,
+ },
+ .cryptodev_init = crypto_qat_dev_init,
+ .dev_private_size = sizeof(struct qat_pmd_private),
+};
+
+RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_QAT_SYM_PMD, rte_qat_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_QAT_SYM_PMD, pci_id_qat_map);
+
diff --git a/src/seastar/dpdk/drivers/crypto/scheduler/Makefile b/src/seastar/dpdk/drivers/crypto/scheduler/Makefile
new file mode 100644
index 00000000..c273e784
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/scheduler/Makefile
@@ -0,0 +1,60 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_crypto_scheduler.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_crypto_scheduler_version.map
+
+#
+# Export include files
+#
+SYMLINK-y-include += rte_cryptodev_scheduler_operations.h
+SYMLINK-y-include += rte_cryptodev_scheduler.h
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_pmd_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += rte_cryptodev_scheduler.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_roundrobin.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_pkt_size_distr.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_failover.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/seastar/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/src/seastar/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
new file mode 100644
index 00000000..319dcf0a
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -0,0 +1,593 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <rte_reorder.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler.h"
+#include "scheduler_pmd_private.h"
+
+/** update the scheduler pmd's capability with attaching device's
+ * capability.
+ * For each device to be attached, the scheduler's capability should be
+ * the common capability set of all slaves
+ **/
+static uint32_t
+sync_caps(struct rte_cryptodev_capabilities *caps,
+ uint32_t nb_caps,
+ const struct rte_cryptodev_capabilities *slave_caps)
+{
+ uint32_t sync_nb_caps = nb_caps, nb_slave_caps = 0;
+ uint32_t i;
+
+ while (slave_caps[nb_slave_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
+ nb_slave_caps++;
+
+ if (nb_caps == 0) {
+ rte_memcpy(caps, slave_caps, sizeof(*caps) * nb_slave_caps);
+ return nb_slave_caps;
+ }
+
+ for (i = 0; i < sync_nb_caps; i++) {
+ struct rte_cryptodev_capabilities *cap = &caps[i];
+ uint32_t j;
+
+ for (j = 0; j < nb_slave_caps; j++) {
+ const struct rte_cryptodev_capabilities *s_cap =
+ &slave_caps[j];
+
+ if (s_cap->op != cap->op || s_cap->sym.xform_type !=
+ cap->sym.xform_type)
+ continue;
+
+ if (s_cap->sym.xform_type ==
+ RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (s_cap->sym.auth.algo !=
+ cap->sym.auth.algo)
+ continue;
+
+ cap->sym.auth.digest_size.min =
+ s_cap->sym.auth.digest_size.min <
+ cap->sym.auth.digest_size.min ?
+ s_cap->sym.auth.digest_size.min :
+ cap->sym.auth.digest_size.min;
+ cap->sym.auth.digest_size.max =
+ s_cap->sym.auth.digest_size.max <
+ cap->sym.auth.digest_size.max ?
+ s_cap->sym.auth.digest_size.max :
+ cap->sym.auth.digest_size.max;
+
+ }
+
+ if (s_cap->sym.xform_type ==
+ RTE_CRYPTO_SYM_XFORM_CIPHER)
+ if (s_cap->sym.cipher.algo !=
+ cap->sym.cipher.algo)
+ continue;
+
+ /* no common cap found */
+ break;
+ }
+
+ if (j < nb_slave_caps)
+ continue;
+
+ /* remove a uncommon cap from the array */
+ for (j = i; j < sync_nb_caps - 1; j++)
+ rte_memcpy(&caps[j], &caps[j+1], sizeof(*cap));
+
+ memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap));
+ sync_nb_caps--;
+ }
+
+ return sync_nb_caps;
+}
+
+static int
+update_scheduler_capability(struct scheduler_ctx *sched_ctx)
+{
+ struct rte_cryptodev_capabilities tmp_caps[256] = { {0} };
+ uint32_t nb_caps = 0, i;
+
+ if (sched_ctx->capabilities)
+ rte_free(sched_ctx->capabilities);
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ struct rte_cryptodev_info dev_info;
+
+ rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
+
+ nb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities);
+ if (nb_caps == 0)
+ return -1;
+ }
+
+ sched_ctx->capabilities = rte_zmalloc_socket(NULL,
+ sizeof(struct rte_cryptodev_capabilities) *
+ (nb_caps + 1), 0, SOCKET_ID_ANY);
+ if (!sched_ctx->capabilities)
+ return -ENOMEM;
+
+ rte_memcpy(sched_ctx->capabilities, tmp_caps,
+ sizeof(struct rte_cryptodev_capabilities) * nb_caps);
+
+ return 0;
+}
+
+static void
+update_scheduler_feature_flag(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+
+ dev->feature_flags = 0;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ struct rte_cryptodev_info dev_info;
+
+ rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
+
+ dev->feature_flags |= dev_info.feature_flags;
+ }
+}
+
+static void
+update_max_nb_qp(struct scheduler_ctx *sched_ctx)
+{
+ uint32_t i;
+ uint32_t max_nb_qp;
+
+ if (!sched_ctx->nb_slaves)
+ return;
+
+ max_nb_qp = sched_ctx->nb_slaves ? UINT32_MAX : 0;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ struct rte_cryptodev_info dev_info;
+
+ rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
+ max_nb_qp = dev_info.max_nb_queue_pairs < max_nb_qp ?
+ dev_info.max_nb_queue_pairs : max_nb_qp;
+ }
+
+ sched_ctx->max_nb_queue_pairs = max_nb_qp;
+}
+
+/** Attach a device to the scheduler. */
+int
+rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+ struct scheduler_slave *slave;
+ struct rte_cryptodev_info dev_info;
+ uint32_t i;
+
+ if (!dev) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->data->dev_started) {
+ CS_LOG_ERR("Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+ if (sched_ctx->nb_slaves >=
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
+ CS_LOG_ERR("Too many slaves attached");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++)
+ if (sched_ctx->slaves[i].dev_id == slave_id) {
+ CS_LOG_ERR("Slave already added");
+ return -ENOTSUP;
+ }
+
+ slave = &sched_ctx->slaves[sched_ctx->nb_slaves];
+
+ rte_cryptodev_info_get(slave_id, &dev_info);
+
+ slave->dev_id = slave_id;
+ slave->dev_type = dev_info.dev_type;
+ sched_ctx->nb_slaves++;
+
+ if (update_scheduler_capability(sched_ctx) < 0) {
+ slave->dev_id = 0;
+ slave->dev_type = 0;
+ sched_ctx->nb_slaves--;
+
+ CS_LOG_ERR("capabilities update failed");
+ return -ENOTSUP;
+ }
+
+ update_scheduler_feature_flag(dev);
+
+ update_max_nb_qp(sched_ctx);
+
+ return 0;
+}
+
+int
+rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+ uint32_t i, slave_pos;
+
+ if (!dev) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->data->dev_started) {
+ CS_LOG_ERR("Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ for (slave_pos = 0; slave_pos < sched_ctx->nb_slaves; slave_pos++)
+ if (sched_ctx->slaves[slave_pos].dev_id == slave_id)
+ break;
+ if (slave_pos == sched_ctx->nb_slaves) {
+ CS_LOG_ERR("Cannot find slave");
+ return -ENOTSUP;
+ }
+
+ if (sched_ctx->ops.slave_detach(dev, slave_id) < 0) {
+ CS_LOG_ERR("Failed to detach slave");
+ return -ENOTSUP;
+ }
+
+ for (i = slave_pos; i < sched_ctx->nb_slaves - 1; i++) {
+ memcpy(&sched_ctx->slaves[i], &sched_ctx->slaves[i+1],
+ sizeof(struct scheduler_slave));
+ }
+ memset(&sched_ctx->slaves[sched_ctx->nb_slaves - 1], 0,
+ sizeof(struct scheduler_slave));
+ sched_ctx->nb_slaves--;
+
+ if (update_scheduler_capability(sched_ctx) < 0) {
+ CS_LOG_ERR("capabilities update failed");
+ return -ENOTSUP;
+ }
+
+ update_scheduler_feature_flag(dev);
+
+ update_max_nb_qp(sched_ctx);
+
+ return 0;
+}
+
+int
+rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
+ enum rte_cryptodev_scheduler_mode mode)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->data->dev_started) {
+ CS_LOG_ERR("Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ if (mode == sched_ctx->mode)
+ return 0;
+
+ switch (mode) {
+ case CDEV_SCHED_MODE_ROUNDROBIN:
+ if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
+ roundrobin_scheduler) < 0) {
+ CS_LOG_ERR("Failed to load scheduler");
+ return -1;
+ }
+ break;
+ case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
+ if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
+ pkt_size_based_distr_scheduler) < 0) {
+ CS_LOG_ERR("Failed to load scheduler");
+ return -1;
+ }
+ break;
+ case CDEV_SCHED_MODE_FAILOVER:
+ if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
+ failover_scheduler) < 0) {
+ CS_LOG_ERR("Failed to load scheduler");
+ return -1;
+ }
+ break;
+ default:
+ CS_LOG_ERR("Not yet supported");
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+int
+rte_crpytodev_scheduler_mode_set(uint8_t scheduler_id,
+ enum rte_cryptodev_scheduler_mode mode)
+{
+ return rte_cryptodev_scheduler_mode_set(scheduler_id, mode);
+}
+
+enum rte_cryptodev_scheduler_mode
+rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ return sched_ctx->mode;
+}
+
+enum rte_cryptodev_scheduler_mode
+rte_crpytodev_scheduler_mode_get(uint8_t scheduler_id)
+{
+ return rte_cryptodev_scheduler_mode_get(scheduler_id);
+}
+
+int
+rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
+ uint32_t enable_reorder)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->data->dev_started) {
+ CS_LOG_ERR("Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ sched_ctx->reordering_enabled = enable_reorder;
+
+ return 0;
+}
+
+int
+rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ return (int)sched_ctx->reordering_enabled;
+}
+
+int
+rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
+ struct rte_cryptodev_scheduler *scheduler) {
+
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->data->dev_started) {
+ CS_LOG_ERR("Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ strncpy(sched_ctx->name, scheduler->name,
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN);
+ strncpy(sched_ctx->description, scheduler->description,
+ RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN);
+
+ /* load scheduler instance operations functions */
+ sched_ctx->ops.config_queue_pair = scheduler->ops->config_queue_pair;
+ sched_ctx->ops.create_private_ctx = scheduler->ops->create_private_ctx;
+ sched_ctx->ops.scheduler_start = scheduler->ops->scheduler_start;
+ sched_ctx->ops.scheduler_stop = scheduler->ops->scheduler_stop;
+ sched_ctx->ops.slave_attach = scheduler->ops->slave_attach;
+ sched_ctx->ops.slave_detach = scheduler->ops->slave_detach;
+ sched_ctx->ops.option_set = scheduler->ops->option_set;
+ sched_ctx->ops.option_get = scheduler->ops->option_get;
+
+ if (sched_ctx->private_ctx)
+ rte_free(sched_ctx->private_ctx);
+
+ if (sched_ctx->ops.create_private_ctx) {
+ int ret = (*sched_ctx->ops.create_private_ctx)(dev);
+
+ if (ret < 0) {
+ CS_LOG_ERR("Unable to create scheduler private "
+ "context");
+ return ret;
+ }
+ }
+
+ sched_ctx->mode = scheduler->mode;
+
+ return 0;
+}
+
+int
+rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+ uint32_t nb_slaves = 0;
+
+ if (!dev) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ nb_slaves = sched_ctx->nb_slaves;
+
+ if (slaves && nb_slaves) {
+ uint32_t i;
+
+ for (i = 0; i < nb_slaves; i++)
+ slaves[i] = sched_ctx->slaves[i].dev_id;
+ }
+
+ return (int)nb_slaves;
+}
+
+int
+rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,
+ enum rte_cryptodev_schedule_option_type option_type,
+ void *option)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (option_type == CDEV_SCHED_OPTION_NOT_SET ||
+ option_type >= CDEV_SCHED_OPTION_COUNT) {
+ CS_LOG_ERR("Invalid option parameter");
+ return -EINVAL;
+ }
+
+ if (!option) {
+ CS_LOG_ERR("Invalid option parameter");
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_started) {
+ CS_LOG_ERR("Illegal operation");
+ return -EBUSY;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_set, -ENOTSUP);
+
+ return (*sched_ctx->ops.option_set)(dev, option_type, option);
+}
+
+int
+rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
+ enum rte_cryptodev_schedule_option_type option_type,
+ void *option)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+
+ if (!dev) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (!option) {
+ CS_LOG_ERR("Invalid option parameter");
+ return -EINVAL;
+ }
+
+ if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.option_get, -ENOTSUP);
+
+ return (*sched_ctx->ops.option_get)(dev, option_type, option);
+}
diff --git a/src/seastar/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/src/seastar/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
new file mode 100644
index 00000000..2ba6e470
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
@@ -0,0 +1,334 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTO_SCHEDULER_H
+#define _RTE_CRYPTO_SCHEDULER_H
+
+/**
+ * @file rte_cryptodev_scheduler.h
+ *
+ * RTE Cryptodev Scheduler Device
+ *
+ * The RTE Cryptodev Scheduler Device allows the aggregation of multiple (slave)
+ * Cryptodevs into a single logical crypto device, and the scheduling the
+ * crypto operations to the slaves based on the mode of the specified mode of
+ * operation specified and supported. This implementation supports 3 modes of
+ * operation: round robin, packet-size based, and fail-over.
+ */
+
+#include <stdint.h>
+#include "rte_cryptodev_scheduler_operations.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Maximum number of bonded devices per device */
+#ifndef RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES
+#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES (8)
+#endif
+
+/** Round-robin scheduling mode string */
+#define SCHEDULER_MODE_NAME_ROUND_ROBIN round-robin
+/** Packet-size based distribution scheduling mode string */
+#define SCHEDULER_MODE_NAME_PKT_SIZE_DISTR packet-size-distr
+/** Fail-over scheduling mode string */
+#define SCHEDULER_MODE_NAME_FAIL_OVER fail-over
+
+/**
+ * Crypto scheduler PMD operation modes
+ */
+enum rte_cryptodev_scheduler_mode {
+ CDEV_SCHED_MODE_NOT_SET = 0,
+ /** User defined mode */
+ CDEV_SCHED_MODE_USERDEFINED,
+ /** Round-robin mode */
+ CDEV_SCHED_MODE_ROUNDROBIN,
+ /** Packet-size based distribution mode */
+ CDEV_SCHED_MODE_PKT_SIZE_DISTR,
+ /** Fail-over mode */
+ CDEV_SCHED_MODE_FAILOVER,
+
+ CDEV_SCHED_MODE_COUNT /**< number of modes */
+};
+
+#define RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN (64)
+#define RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN (256)
+
+/**
+ * Crypto scheduler option types
+ */
+enum rte_cryptodev_schedule_option_type {
+ CDEV_SCHED_OPTION_NOT_SET = 0,
+ CDEV_SCHED_OPTION_THRESHOLD,
+
+ CDEV_SCHED_OPTION_COUNT
+};
+
+/**
+ * Threshold option structure
+ */
+struct rte_cryptodev_scheduler_threshold_option {
+ uint32_t threshold; /**< Threshold for packet-size mode */
+};
+
+struct rte_cryptodev_scheduler;
+
+/**
+ * Load a user defined scheduler
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param scheduler
+ * Pointer to the user defined scheduler
+ *
+ * @return
+ * - 0 if the scheduler is successfully loaded
+ * - -ENOTSUP if the operation is not supported.
+ * - -EBUSY if device is started.
+ */
+int
+rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
+ struct rte_cryptodev_scheduler *scheduler);
+
+/**
+ * Attach a crypto device to the scheduler
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param slave_id
+ * Crypto device ID to be attached
+ *
+ * @return
+ * - 0 if the slave is attached.
+ * - -ENOTSUP if the operation is not supported.
+ * - -EBUSY if device is started.
+ * - -ENOMEM if the scheduler's slave list is full.
+ */
+int
+rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id);
+
+/**
+ * Detach a crypto device from the scheduler
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param slave_id
+ * Crypto device ID to be detached
+ *
+ * @return
+ * - 0 if the slave is detached.
+ * - -ENOTSUP if the operation is not supported.
+ * - -EBUSY if device is started.
+ */
+int
+rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id);
+
+
+/**
+ * Set the scheduling mode
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param mode
+ * The scheduling mode
+ *
+ * @return
+ * - 0 if the mode is set.
+ * - -ENOTSUP if the operation is not supported.
+ * - -EBUSY if device is started.
+ */
+int
+rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
+ enum rte_cryptodev_scheduler_mode mode);
+
+/**
+ * Get the current scheduling mode
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ *
+ * @return mode
+ * - non-negative enumerate value: the scheduling mode
+ * - -ENOTSUP if the operation is not supported.
+ */
+enum rte_cryptodev_scheduler_mode
+rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id);
+
+/**
+ * @deprecated
+ * Set the scheduling mode
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param mode
+ * The scheduling mode
+ *
+ * @return
+ * 0 if attaching successful, negative integer if otherwise.
+ */
+__rte_deprecated
+int
+rte_crpytodev_scheduler_mode_set(uint8_t scheduler_id,
+ enum rte_cryptodev_scheduler_mode mode);
+
+/**
+ * @deprecated
+ * Get the current scheduling mode
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ *
+ * @return
+ * If successful, returns the scheduling mode, negative integer
+ * otherwise
+ */
+__rte_deprecated
+enum rte_cryptodev_scheduler_mode
+rte_crpytodev_scheduler_mode_get(uint8_t scheduler_id);
+
+/**
+ * Set the crypto ops reordering feature on/off
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param enable_reorder
+ * Set the crypto op reordering feature
+ * - 0: disable reordering
+ * - 1: enable reordering
+ *
+ * @return
+ * - 0 if the ordering is set.
+ * - -ENOTSUP if the operation is not supported.
+ * - -EBUSY if device is started.
+ */
+int
+rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
+ uint32_t enable_reorder);
+
+/**
+ * Get the current crypto ops reordering feature
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ *
+ * @return
+ * - 0 if reordering is disabled
+ * - 1 if reordering is enabled
+ * - -ENOTSUP if the operation is not supported.
+ */
+int
+rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id);
+
+/**
+ * Get the the attached slaves' count and/or ID
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param slaves
+ * If successful, the function will write back all slaves' device IDs to it.
+ * This parameter will either be an uint8_t array of
+ * RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES elements or NULL.
+ *
+ * @return
+ * - non-negative number: the number of slaves attached
+ * - -ENOTSUP if the operation is not supported.
+ */
+int
+rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves);
+
+/**
+ * Set the mode specific option
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param option_type
+ * The option type enumerate
+ * @param option
+ * The specific mode's option structure
+ *
+ * @return
+ * - 0 if successful
+ * - negative integer if otherwise.
+ */
+int
+rte_cryptodev_scheduler_option_set(uint8_t scheduler_id,
+ enum rte_cryptodev_schedule_option_type option_type,
+ void *option);
+
+/**
+ * Set the mode specific option
+ *
+ * @param scheduler_id
+ * The target scheduler device ID
+ * @param option_type
+ * The option type enumerate
+ * @param option
+ * If successful, the function will write back the current
+ *
+ * @return
+ * - 0 if successful
+ * - negative integer if otherwise.
+ */
+int
+rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
+ enum rte_cryptodev_schedule_option_type option_type,
+ void *option);
+
+typedef uint16_t (*rte_cryptodev_scheduler_burst_enqueue_t)(void *qp_ctx,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
+
+typedef uint16_t (*rte_cryptodev_scheduler_burst_dequeue_t)(void *qp_ctx,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
+
+/** The data structure associated with each mode of scheduler. */
+struct rte_cryptodev_scheduler {
+ const char *name; /**< Scheduler name */
+ const char *description; /**< Scheduler description */
+ enum rte_cryptodev_scheduler_mode mode; /**< Scheduling mode */
+
+ /** Pointer to scheduler operation structure */
+ struct rte_cryptodev_scheduler_ops *ops;
+};
+
+/** Round-robin mode scheduler */
+extern struct rte_cryptodev_scheduler *roundrobin_scheduler;
+/** Packet-size based distribution mode scheduler */
+extern struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler;
+/** Fail-over mode scheduler */
+extern struct rte_cryptodev_scheduler *failover_scheduler;
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _RTE_CRYPTO_SCHEDULER_H */
diff --git a/src/seastar/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h b/src/seastar/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h
new file mode 100644
index 00000000..719165c3
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h
@@ -0,0 +1,85 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTO_SCHEDULER_OPERATIONS_H
+#define _RTE_CRYPTO_SCHEDULER_OPERATIONS_H
+
+#include <rte_cryptodev.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef int (*rte_cryptodev_scheduler_slave_attach_t)(
+ struct rte_cryptodev *dev, uint8_t slave_id);
+typedef int (*rte_cryptodev_scheduler_slave_detach_t)(
+ struct rte_cryptodev *dev, uint8_t slave_id);
+
+typedef int (*rte_cryptodev_scheduler_start_t)(struct rte_cryptodev *dev);
+typedef int (*rte_cryptodev_scheduler_stop_t)(struct rte_cryptodev *dev);
+
+typedef int (*rte_cryptodev_scheduler_config_queue_pair)(
+ struct rte_cryptodev *dev, uint16_t qp_id);
+
+typedef int (*rte_cryptodev_scheduler_create_private_ctx)(
+ struct rte_cryptodev *dev);
+
+typedef int (*rte_cryptodev_scheduler_config_option_set)(
+ struct rte_cryptodev *dev,
+ uint32_t option_type,
+ void *option);
+
+typedef int (*rte_cryptodev_scheduler_config_option_get)(
+ struct rte_cryptodev *dev,
+ uint32_t option_type,
+ void *option);
+
+struct rte_cryptodev_scheduler_ops {
+ rte_cryptodev_scheduler_slave_attach_t slave_attach;
+ rte_cryptodev_scheduler_slave_attach_t slave_detach;
+
+ rte_cryptodev_scheduler_start_t scheduler_start;
+ rte_cryptodev_scheduler_stop_t scheduler_stop;
+
+ rte_cryptodev_scheduler_config_queue_pair config_queue_pair;
+
+ rte_cryptodev_scheduler_create_private_ctx create_private_ctx;
+
+ rte_cryptodev_scheduler_config_option_set option_set;
+ rte_cryptodev_scheduler_config_option_get option_get;
+};
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _RTE_CRYPTO_SCHEDULER_OPERATIONS_H */
diff --git a/src/seastar/dpdk/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map b/src/seastar/dpdk/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map
new file mode 100644
index 00000000..0a8b4716
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map
@@ -0,0 +1,23 @@
+DPDK_17.02 {
+ global:
+
+ rte_cryptodev_scheduler_load_user_scheduler;
+ rte_cryptodev_scheduler_slave_attach;
+ rte_cryptodev_scheduler_slave_detach;
+ rte_crpytodev_scheduler_mode_set;
+ rte_crpytodev_scheduler_mode_get;
+ rte_cryptodev_scheduler_ordering_set;
+ rte_cryptodev_scheduler_ordering_get;
+
+};
+
+DPDK_17.05 {
+ global:
+
+ rte_cryptodev_scheduler_mode_get;
+ rte_cryptodev_scheduler_mode_set;
+ rte_cryptodev_scheduler_option_get;
+ rte_cryptodev_scheduler_option_set;
+ rte_cryptodev_scheduler_slaves_get;
+
+} DPDK_17.02;
diff --git a/src/seastar/dpdk/drivers/crypto/scheduler/scheduler_failover.c b/src/seastar/dpdk/drivers/crypto/scheduler/scheduler_failover.c
new file mode 100644
index 00000000..2471a5f1
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/scheduler/scheduler_failover.c
@@ -0,0 +1,287 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler_operations.h"
+#include "scheduler_pmd_private.h"
+
+#define PRIMARY_SLAVE_IDX 0
+#define SECONDARY_SLAVE_IDX 1
+#define NB_FAILOVER_SLAVES 2
+#define SLAVE_SWITCH_MASK (0x01)
+
+struct fo_scheduler_qp_ctx {
+ struct scheduler_slave primary_slave;
+ struct scheduler_slave secondary_slave;
+
+ uint8_t deq_idx;
+};
+
+static inline uint16_t __attribute__((always_inline))
+failover_slave_enqueue(struct scheduler_slave *slave, uint8_t slave_idx,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ uint16_t i, processed_ops;
+ struct rte_cryptodev_sym_session *sessions[nb_ops];
+ struct scheduler_session *sess0, *sess1, *sess2, *sess3;
+
+ for (i = 0; i < nb_ops && i < 4; i++)
+ rte_prefetch0(ops[i]->sym->session);
+
+ for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
+ rte_prefetch0(ops[i + 4]->sym->session);
+ rte_prefetch0(ops[i + 5]->sym->session);
+ rte_prefetch0(ops[i + 6]->sym->session);
+ rte_prefetch0(ops[i + 7]->sym->session);
+
+ sess0 = (struct scheduler_session *)
+ ops[i]->sym->session->_private;
+ sess1 = (struct scheduler_session *)
+ ops[i+1]->sym->session->_private;
+ sess2 = (struct scheduler_session *)
+ ops[i+2]->sym->session->_private;
+ sess3 = (struct scheduler_session *)
+ ops[i+3]->sym->session->_private;
+
+ sessions[i] = ops[i]->sym->session;
+ sessions[i + 1] = ops[i + 1]->sym->session;
+ sessions[i + 2] = ops[i + 2]->sym->session;
+ sessions[i + 3] = ops[i + 3]->sym->session;
+
+ ops[i]->sym->session = sess0->sessions[slave_idx];
+ ops[i + 1]->sym->session = sess1->sessions[slave_idx];
+ ops[i + 2]->sym->session = sess2->sessions[slave_idx];
+ ops[i + 3]->sym->session = sess3->sessions[slave_idx];
+ }
+
+ for (; i < nb_ops; i++) {
+ sess0 = (struct scheduler_session *)
+ ops[i]->sym->session->_private;
+ sessions[i] = ops[i]->sym->session;
+ ops[i]->sym->session = sess0->sessions[slave_idx];
+ }
+
+ processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+ slave->nb_inflight_cops += processed_ops;
+
+ if (unlikely(processed_ops < nb_ops))
+ for (i = processed_ops; i < nb_ops; i++)
+ ops[i]->sym->session = sessions[i];
+
+ return processed_ops;
+}
+
+static uint16_t
+schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct fo_scheduler_qp_ctx *qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ uint16_t enqueued_ops;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ enqueued_ops = failover_slave_enqueue(&qp_ctx->primary_slave,
+ PRIMARY_SLAVE_IDX, ops, nb_ops);
+
+ if (enqueued_ops < nb_ops)
+ enqueued_ops += failover_slave_enqueue(&qp_ctx->secondary_slave,
+ SECONDARY_SLAVE_IDX, &ops[enqueued_ops],
+ nb_ops - enqueued_ops);
+
+ return enqueued_ops;
+}
+
+
+static uint16_t
+schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+ uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
+ nb_ops);
+ uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
+ nb_ops_to_enq);
+
+ scheduler_order_insert(order_ring, ops, nb_ops_enqd);
+
+ return nb_ops_enqd;
+}
+
+static uint16_t
+schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct fo_scheduler_qp_ctx *qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct scheduler_slave *slaves[NB_FAILOVER_SLAVES] = {
+ &qp_ctx->primary_slave, &qp_ctx->secondary_slave};
+ struct scheduler_slave *slave = slaves[qp_ctx->deq_idx];
+ uint16_t nb_deq_ops = 0, nb_deq_ops2 = 0;
+
+ if (slave->nb_inflight_cops) {
+ nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+ slave->nb_inflight_cops -= nb_deq_ops;
+ }
+
+ qp_ctx->deq_idx = (~qp_ctx->deq_idx) & SLAVE_SWITCH_MASK;
+
+ if (nb_deq_ops == nb_ops)
+ return nb_deq_ops;
+
+ slave = slaves[qp_ctx->deq_idx];
+
+ if (slave->nb_inflight_cops) {
+ nb_deq_ops2 = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, &ops[nb_deq_ops], nb_ops - nb_deq_ops);
+ slave->nb_inflight_cops -= nb_deq_ops2;
+ }
+
+ return nb_deq_ops + nb_deq_ops2;
+}
+
+static uint16_t
+schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+
+ schedule_dequeue(qp, ops, nb_ops);
+
+ return scheduler_order_drain(order_ring, ops, nb_ops);
+}
+
+static int
+slave_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+slave_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+scheduler_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint16_t i;
+
+ if (sched_ctx->nb_slaves < 2) {
+ CS_LOG_ERR("Number of slaves shall no less than 2");
+ return -ENOMEM;
+ }
+
+ if (sched_ctx->reordering_enabled) {
+ dev->enqueue_burst = schedule_enqueue_ordering;
+ dev->dequeue_burst = schedule_dequeue_ordering;
+ } else {
+ dev->enqueue_burst = schedule_enqueue;
+ dev->dequeue_burst = schedule_dequeue;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct fo_scheduler_qp_ctx *qp_ctx =
+ ((struct scheduler_qp_ctx *)
+ dev->data->queue_pairs[i])->private_qp_ctx;
+
+ rte_memcpy(&qp_ctx->primary_slave,
+ &sched_ctx->slaves[PRIMARY_SLAVE_IDX],
+ sizeof(struct scheduler_slave));
+ rte_memcpy(&qp_ctx->secondary_slave,
+ &sched_ctx->slaves[SECONDARY_SLAVE_IDX],
+ sizeof(struct scheduler_slave));
+ }
+
+ return 0;
+}
+
+static int
+scheduler_stop(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+static int
+scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+ struct fo_scheduler_qp_ctx *fo_qp_ctx;
+
+ fo_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*fo_qp_ctx), 0,
+ rte_socket_id());
+ if (!fo_qp_ctx) {
+ CS_LOG_ERR("failed allocate memory for private queue pair");
+ return -ENOMEM;
+ }
+
+ qp_ctx->private_qp_ctx = (void *)fo_qp_ctx;
+
+ return 0;
+}
+
+static int
+scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+struct rte_cryptodev_scheduler_ops scheduler_fo_ops = {
+ slave_attach,
+ slave_detach,
+ scheduler_start,
+ scheduler_stop,
+ scheduler_config_qp,
+ scheduler_create_private_ctx,
+ NULL, /* option_set */
+ NULL /*option_get */
+};
+
+struct rte_cryptodev_scheduler fo_scheduler = {
+ .name = "failover-scheduler",
+ .description = "scheduler which enqueues to the primary slave, "
+ "and only then enqueues to the secondary slave "
+ "upon failing on enqueuing to primary",
+ .mode = CDEV_SCHED_MODE_FAILOVER,
+ .ops = &scheduler_fo_ops
+};
+
+struct rte_cryptodev_scheduler *failover_scheduler = &fo_scheduler;
diff --git a/src/seastar/dpdk/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/src/seastar/dpdk/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
new file mode 100644
index 00000000..6b628dfa
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
@@ -0,0 +1,464 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler_operations.h"
+#include "scheduler_pmd_private.h"
+
+#define DEF_PKT_SIZE_THRESHOLD (0xffffff80)
+#define SLAVE_IDX_SWITCH_MASK (0x01)
+#define PRIMARY_SLAVE_IDX 0
+#define SECONDARY_SLAVE_IDX 1
+#define NB_PKT_SIZE_SLAVES 2
+
+/** pkt size based scheduler context */
+struct psd_scheduler_ctx {
+ uint32_t threshold;
+};
+
+/** pkt size based scheduler queue pair context */
+struct psd_scheduler_qp_ctx {
+ struct scheduler_slave primary_slave;
+ struct scheduler_slave secondary_slave;
+ uint32_t threshold;
+ uint8_t deq_idx;
+} __rte_cache_aligned;
+
+/** scheduling operation variables' wrapping */
+struct psd_schedule_op {
+ uint8_t slave_idx;
+ uint16_t pos;
+};
+
+static uint16_t
+schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct scheduler_qp_ctx *qp_ctx = qp;
+ struct psd_scheduler_qp_ctx *psd_qp_ctx = qp_ctx->private_qp_ctx;
+ struct rte_crypto_op *sched_ops[NB_PKT_SIZE_SLAVES][nb_ops];
+ struct scheduler_session *sess;
+ uint32_t in_flight_ops[NB_PKT_SIZE_SLAVES] = {
+ psd_qp_ctx->primary_slave.nb_inflight_cops,
+ psd_qp_ctx->secondary_slave.nb_inflight_cops
+ };
+ struct psd_schedule_op enq_ops[NB_PKT_SIZE_SLAVES] = {
+ {PRIMARY_SLAVE_IDX, 0}, {SECONDARY_SLAVE_IDX, 0}
+ };
+ struct psd_schedule_op *p_enq_op;
+ uint16_t i, processed_ops_pri = 0, processed_ops_sec = 0;
+ uint32_t job_len;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ for (i = 0; i < nb_ops && i < 4; i++) {
+ rte_prefetch0(ops[i]->sym);
+ rte_prefetch0(ops[i]->sym->session);
+ }
+
+ for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
+ rte_prefetch0(ops[i + 4]->sym);
+ rte_prefetch0(ops[i + 4]->sym->session);
+ rte_prefetch0(ops[i + 5]->sym);
+ rte_prefetch0(ops[i + 5]->sym->session);
+ rte_prefetch0(ops[i + 6]->sym);
+ rte_prefetch0(ops[i + 6]->sym->session);
+ rte_prefetch0(ops[i + 7]->sym);
+ rte_prefetch0(ops[i + 7]->sym->session);
+
+ sess = (struct scheduler_session *)
+ ops[i]->sym->session->_private;
+ /* job_len is initialized as cipher data length, once
+ * it is 0, equals to auth data length
+ */
+ job_len = ops[i]->sym->cipher.data.length;
+ job_len += (ops[i]->sym->cipher.data.length == 0) *
+ ops[i]->sym->auth.data.length;
+ /* decide the target op based on the job length */
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
+
+ /* stop schedule cops before the queue is full, this shall
+ * prevent the failed enqueue
+ */
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ qp_ctx->max_nb_objs) {
+ i = nb_ops;
+ break;
+ }
+
+ sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
+ ops[i]->sym->session = sess->sessions[p_enq_op->slave_idx];
+ p_enq_op->pos++;
+
+ sess = (struct scheduler_session *)
+ ops[i+1]->sym->session->_private;
+ job_len = ops[i+1]->sym->cipher.data.length;
+ job_len += (ops[i+1]->sym->cipher.data.length == 0) *
+ ops[i+1]->sym->auth.data.length;
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
+
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ qp_ctx->max_nb_objs) {
+ i = nb_ops;
+ break;
+ }
+
+ sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+1];
+ ops[i+1]->sym->session = sess->sessions[p_enq_op->slave_idx];
+ p_enq_op->pos++;
+
+ sess = (struct scheduler_session *)
+ ops[i+2]->sym->session->_private;
+ job_len = ops[i+2]->sym->cipher.data.length;
+ job_len += (ops[i+2]->sym->cipher.data.length == 0) *
+ ops[i+2]->sym->auth.data.length;
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
+
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ qp_ctx->max_nb_objs) {
+ i = nb_ops;
+ break;
+ }
+
+ sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+2];
+ ops[i+2]->sym->session = sess->sessions[p_enq_op->slave_idx];
+ p_enq_op->pos++;
+
+ sess = (struct scheduler_session *)
+ ops[i+3]->sym->session->_private;
+
+ job_len = ops[i+3]->sym->cipher.data.length;
+ job_len += (ops[i+3]->sym->cipher.data.length == 0) *
+ ops[i+3]->sym->auth.data.length;
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
+
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ qp_ctx->max_nb_objs) {
+ i = nb_ops;
+ break;
+ }
+
+ sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i+3];
+ ops[i+3]->sym->session = sess->sessions[p_enq_op->slave_idx];
+ p_enq_op->pos++;
+ }
+
+ for (; i < nb_ops; i++) {
+ sess = (struct scheduler_session *)
+ ops[i]->sym->session->_private;
+
+ job_len = ops[i]->sym->cipher.data.length;
+ job_len += (ops[i]->sym->cipher.data.length == 0) *
+ ops[i]->sym->auth.data.length;
+ p_enq_op = &enq_ops[!(job_len & psd_qp_ctx->threshold)];
+
+ if (p_enq_op->pos + in_flight_ops[p_enq_op->slave_idx] ==
+ qp_ctx->max_nb_objs) {
+ i = nb_ops;
+ break;
+ }
+
+ sched_ops[p_enq_op->slave_idx][p_enq_op->pos] = ops[i];
+ ops[i]->sym->session = sess->sessions[p_enq_op->slave_idx];
+ p_enq_op->pos++;
+ }
+
+ processed_ops_pri = rte_cryptodev_enqueue_burst(
+ psd_qp_ctx->primary_slave.dev_id,
+ psd_qp_ctx->primary_slave.qp_id,
+ sched_ops[PRIMARY_SLAVE_IDX],
+ enq_ops[PRIMARY_SLAVE_IDX].pos);
+ /* enqueue shall not fail as the slave queue is monitored */
+ RTE_ASSERT(processed_ops_pri == enq_ops[PRIMARY_SLAVE_IDX].pos);
+
+ psd_qp_ctx->primary_slave.nb_inflight_cops += processed_ops_pri;
+
+ processed_ops_sec = rte_cryptodev_enqueue_burst(
+ psd_qp_ctx->secondary_slave.dev_id,
+ psd_qp_ctx->secondary_slave.qp_id,
+ sched_ops[SECONDARY_SLAVE_IDX],
+ enq_ops[SECONDARY_SLAVE_IDX].pos);
+ RTE_ASSERT(processed_ops_sec == enq_ops[SECONDARY_SLAVE_IDX].pos);
+
+ psd_qp_ctx->secondary_slave.nb_inflight_cops += processed_ops_sec;
+
+ return processed_ops_pri + processed_ops_sec;
+}
+
+static uint16_t
+schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+ uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
+ nb_ops);
+ uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
+ nb_ops_to_enq);
+
+ scheduler_order_insert(order_ring, ops, nb_ops_enqd);
+
+ return nb_ops_enqd;
+}
+
+static uint16_t
+schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct psd_scheduler_qp_ctx *qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct scheduler_slave *slaves[NB_PKT_SIZE_SLAVES] = {
+ &qp_ctx->primary_slave, &qp_ctx->secondary_slave};
+ struct scheduler_slave *slave = slaves[qp_ctx->deq_idx];
+ uint16_t nb_deq_ops_pri = 0, nb_deq_ops_sec = 0;
+
+ if (slave->nb_inflight_cops) {
+ nb_deq_ops_pri = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+ slave->nb_inflight_cops -= nb_deq_ops_pri;
+ }
+
+ qp_ctx->deq_idx = (~qp_ctx->deq_idx) & SLAVE_IDX_SWITCH_MASK;
+
+ if (nb_deq_ops_pri == nb_ops)
+ return nb_deq_ops_pri;
+
+ slave = slaves[qp_ctx->deq_idx];
+
+ if (slave->nb_inflight_cops) {
+ nb_deq_ops_sec = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, &ops[nb_deq_ops_pri],
+ nb_ops - nb_deq_ops_pri);
+ slave->nb_inflight_cops -= nb_deq_ops_sec;
+
+ if (!slave->nb_inflight_cops)
+ qp_ctx->deq_idx = (~qp_ctx->deq_idx) &
+ SLAVE_IDX_SWITCH_MASK;
+ }
+
+ return nb_deq_ops_pri + nb_deq_ops_sec;
+}
+
+static uint16_t
+schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+
+ schedule_dequeue(qp, ops, nb_ops);
+
+ return scheduler_order_drain(order_ring, ops, nb_ops);
+}
+
+static int
+slave_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+slave_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+scheduler_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct psd_scheduler_ctx *psd_ctx = sched_ctx->private_ctx;
+ uint16_t i;
+
+ /* for packet size based scheduler, nb_slaves have to >= 2 */
+ if (sched_ctx->nb_slaves < NB_PKT_SIZE_SLAVES) {
+ CS_LOG_ERR("not enough slaves to start");
+ return -1;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+ struct psd_scheduler_qp_ctx *ps_qp_ctx =
+ qp_ctx->private_qp_ctx;
+
+ ps_qp_ctx->primary_slave.dev_id =
+ sched_ctx->slaves[PRIMARY_SLAVE_IDX].dev_id;
+ ps_qp_ctx->primary_slave.qp_id = i;
+ ps_qp_ctx->primary_slave.nb_inflight_cops = 0;
+
+ ps_qp_ctx->secondary_slave.dev_id =
+ sched_ctx->slaves[SECONDARY_SLAVE_IDX].dev_id;
+ ps_qp_ctx->secondary_slave.qp_id = i;
+ ps_qp_ctx->secondary_slave.nb_inflight_cops = 0;
+
+ ps_qp_ctx->threshold = psd_ctx->threshold;
+ }
+
+ if (sched_ctx->reordering_enabled) {
+ dev->enqueue_burst = &schedule_enqueue_ordering;
+ dev->dequeue_burst = &schedule_dequeue_ordering;
+ } else {
+ dev->enqueue_burst = &schedule_enqueue;
+ dev->dequeue_burst = &schedule_dequeue;
+ }
+
+ return 0;
+}
+
+static int
+scheduler_stop(struct rte_cryptodev *dev)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+ struct psd_scheduler_qp_ctx *ps_qp_ctx = qp_ctx->private_qp_ctx;
+
+ if (ps_qp_ctx->primary_slave.nb_inflight_cops +
+ ps_qp_ctx->secondary_slave.nb_inflight_cops) {
+ CS_LOG_ERR("Some crypto ops left in slave queue");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+ struct psd_scheduler_qp_ctx *ps_qp_ctx;
+
+ ps_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*ps_qp_ctx), 0,
+ rte_socket_id());
+ if (!ps_qp_ctx) {
+ CS_LOG_ERR("failed allocate memory for private queue pair");
+ return -ENOMEM;
+ }
+
+ qp_ctx->private_qp_ctx = (void *)ps_qp_ctx;
+
+ return 0;
+}
+
+static int
+scheduler_create_private_ctx(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct psd_scheduler_ctx *psd_ctx;
+
+ if (sched_ctx->private_ctx)
+ rte_free(sched_ctx->private_ctx);
+
+ psd_ctx = rte_zmalloc_socket(NULL, sizeof(struct psd_scheduler_ctx), 0,
+ rte_socket_id());
+ if (!psd_ctx) {
+ CS_LOG_ERR("failed allocate memory");
+ return -ENOMEM;
+ }
+
+ psd_ctx->threshold = DEF_PKT_SIZE_THRESHOLD;
+
+ sched_ctx->private_ctx = (void *)psd_ctx;
+
+ return 0;
+}
+static int
+scheduler_option_set(struct rte_cryptodev *dev, uint32_t option_type,
+ void *option)
+{
+ struct psd_scheduler_ctx *psd_ctx = ((struct scheduler_ctx *)
+ dev->data->dev_private)->private_ctx;
+ uint32_t threshold;
+
+ if ((enum rte_cryptodev_schedule_option_type)option_type !=
+ CDEV_SCHED_OPTION_THRESHOLD) {
+ CS_LOG_ERR("Option not supported");
+ return -EINVAL;
+ }
+
+ threshold = ((struct rte_cryptodev_scheduler_threshold_option *)
+ option)->threshold;
+ if (!rte_is_power_of_2(threshold)) {
+ CS_LOG_ERR("Threshold is not power of 2");
+ return -EINVAL;
+ }
+
+ psd_ctx->threshold = ~(threshold - 1);
+
+ return 0;
+}
+
+static int
+scheduler_option_get(struct rte_cryptodev *dev, uint32_t option_type,
+ void *option)
+{
+ struct psd_scheduler_ctx *psd_ctx = ((struct scheduler_ctx *)
+ dev->data->dev_private)->private_ctx;
+ struct rte_cryptodev_scheduler_threshold_option *threshold_option;
+
+ if ((enum rte_cryptodev_schedule_option_type)option_type !=
+ CDEV_SCHED_OPTION_THRESHOLD) {
+ CS_LOG_ERR("Option not supported");
+ return -EINVAL;
+ }
+
+ threshold_option = option;
+ threshold_option->threshold = (~psd_ctx->threshold) + 1;
+
+ return 0;
+}
+
+struct rte_cryptodev_scheduler_ops scheduler_ps_ops = {
+ slave_attach,
+ slave_detach,
+ scheduler_start,
+ scheduler_stop,
+ scheduler_config_qp,
+ scheduler_create_private_ctx,
+ scheduler_option_set,
+ scheduler_option_get
+};
+
+struct rte_cryptodev_scheduler psd_scheduler = {
+ .name = "packet-size-based-scheduler",
+ .description = "scheduler which will distribute crypto op "
+ "burst based on the packet size",
+ .mode = CDEV_SCHED_MODE_PKT_SIZE_DISTR,
+ .ops = &scheduler_ps_ops
+};
+
+struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler = &psd_scheduler;
diff --git a/src/seastar/dpdk/drivers/crypto/scheduler/scheduler_pmd.c b/src/seastar/dpdk/drivers/crypto/scheduler/scheduler_pmd.c
new file mode 100644
index 00000000..0b63c20b
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/scheduler/scheduler_pmd.c
@@ -0,0 +1,456 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+#include <rte_reorder.h>
+
+#include "rte_cryptodev_scheduler.h"
+#include "scheduler_pmd_private.h"
+
+struct scheduler_init_params {
+ struct rte_crypto_vdev_init_params def_p;
+ uint32_t nb_slaves;
+ enum rte_cryptodev_scheduler_mode mode;
+ uint32_t enable_ordering;
+ char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]
+ [RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
+};
+
+#define RTE_CRYPTODEV_VDEV_NAME ("name")
+#define RTE_CRYPTODEV_VDEV_SLAVE ("slave")
+#define RTE_CRYPTODEV_VDEV_MODE ("mode")
+#define RTE_CRYPTODEV_VDEV_ORDERING ("ordering")
+#define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs")
+#define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions")
+#define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id")
+
+const char *scheduler_valid_params[] = {
+ RTE_CRYPTODEV_VDEV_NAME,
+ RTE_CRYPTODEV_VDEV_SLAVE,
+ RTE_CRYPTODEV_VDEV_MODE,
+ RTE_CRYPTODEV_VDEV_ORDERING,
+ RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
+ RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
+ RTE_CRYPTODEV_VDEV_SOCKET_ID
+};
+
+struct scheduler_parse_map {
+ const char *name;
+ uint32_t val;
+};
+
+const struct scheduler_parse_map scheduler_mode_map[] = {
+ {RTE_STR(SCHEDULER_MODE_NAME_ROUND_ROBIN),
+ CDEV_SCHED_MODE_ROUNDROBIN},
+ {RTE_STR(SCHEDULER_MODE_NAME_PKT_SIZE_DISTR),
+ CDEV_SCHED_MODE_PKT_SIZE_DISTR},
+ {RTE_STR(SCHEDULER_MODE_NAME_FAIL_OVER),
+ CDEV_SCHED_MODE_FAILOVER}
+};
+
+const struct scheduler_parse_map scheduler_ordering_map[] = {
+ {"enable", 1},
+ {"disable", 0}
+};
+
+static int
+cryptodev_scheduler_create(const char *name,
+ struct scheduler_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct scheduler_ctx *sched_ctx;
+ uint32_t i;
+ int ret;
+
+ if (init_params->def_p.name[0] == '\0')
+ snprintf(init_params->def_p.name,
+ sizeof(init_params->def_p.name),
+ "%s", name);
+
+ dev = rte_cryptodev_pmd_virtual_dev_init(init_params->def_p.name,
+ sizeof(struct scheduler_ctx),
+ init_params->def_p.socket_id);
+ if (dev == NULL) {
+ CS_LOG_ERR("driver %s: failed to create cryptodev vdev",
+ name);
+ return -EFAULT;
+ }
+
+ dev->dev_type = RTE_CRYPTODEV_SCHEDULER_PMD;
+ dev->dev_ops = rte_crypto_scheduler_pmd_ops;
+
+ sched_ctx = dev->data->dev_private;
+ sched_ctx->max_nb_queue_pairs =
+ init_params->def_p.max_nb_queue_pairs;
+
+ if (init_params->mode > CDEV_SCHED_MODE_USERDEFINED &&
+ init_params->mode < CDEV_SCHED_MODE_COUNT) {
+ ret = rte_cryptodev_scheduler_mode_set(dev->data->dev_id,
+ init_params->mode);
+ if (ret < 0) {
+ rte_cryptodev_pmd_release_device(dev);
+ return ret;
+ }
+
+ for (i = 0; i < RTE_DIM(scheduler_mode_map); i++) {
+ if (scheduler_mode_map[i].val != sched_ctx->mode)
+ continue;
+
+ RTE_LOG(INFO, PMD, " Scheduling mode = %s\n",
+ scheduler_mode_map[i].name);
+ break;
+ }
+ }
+
+ sched_ctx->reordering_enabled = init_params->enable_ordering;
+
+ for (i = 0; i < RTE_DIM(scheduler_ordering_map); i++) {
+ if (scheduler_ordering_map[i].val !=
+ sched_ctx->reordering_enabled)
+ continue;
+
+ RTE_LOG(INFO, PMD, " Packet ordering = %s\n",
+ scheduler_ordering_map[i].name);
+
+ break;
+ }
+
+ for (i = 0; i < init_params->nb_slaves; i++) {
+ sched_ctx->init_slave_names[sched_ctx->nb_init_slaves] =
+ rte_zmalloc_socket(
+ NULL,
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN, 0,
+ SOCKET_ID_ANY);
+
+ if (!sched_ctx->init_slave_names[
+ sched_ctx->nb_init_slaves]) {
+ CS_LOG_ERR("driver %s: Insufficient memory",
+ name);
+ return -ENOMEM;
+ }
+
+ strncpy(sched_ctx->init_slave_names[
+ sched_ctx->nb_init_slaves],
+ init_params->slave_names[i],
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN - 1);
+
+ sched_ctx->nb_init_slaves++;
+ }
+
+ /*
+ * Initialize capabilities structure as an empty structure,
+ * in case device information is requested when no slaves are attached
+ */
+ sched_ctx->capabilities = rte_zmalloc_socket(NULL,
+ sizeof(struct rte_cryptodev_capabilities),
+ 0, SOCKET_ID_ANY);
+
+ if (!sched_ctx->capabilities) {
+ RTE_LOG(ERR, PMD, "Not enough memory for capability "
+ "information\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int
+cryptodev_scheduler_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+ struct rte_cryptodev *dev;
+ struct scheduler_ctx *sched_ctx;
+
+ if (vdev == NULL)
+ return -EINVAL;
+
+ name = rte_vdev_device_name(vdev);
+ dev = rte_cryptodev_pmd_get_named_dev(name);
+ if (dev == NULL)
+ return -EINVAL;
+
+ sched_ctx = dev->data->dev_private;
+
+ if (sched_ctx->nb_slaves) {
+ uint32_t i;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++)
+ rte_cryptodev_scheduler_slave_detach(dev->data->dev_id,
+ sched_ctx->slaves[i].dev_id);
+ }
+
+ RTE_LOG(INFO, PMD, "Closing Crypto Scheduler device %s on numa "
+ "socket %u\n", name, rte_socket_id());
+
+ return 0;
+}
+
+static uint8_t
+number_of_sockets(void)
+{
+ int sockets = 0;
+ int i;
+ const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+
+ for (i = 0; ((i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL)); i++) {
+ if (sockets < ms[i].socket_id)
+ sockets = ms[i].socket_id;
+ }
+
+ /* Number of sockets = maximum socket_id + 1 */
+ return ++sockets;
+}
+
+/** Parse integer from integer argument */
+static int
+parse_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int *i = (int *) extra_args;
+
+ *i = atoi(value);
+ if (*i < 0) {
+ CS_LOG_ERR("Argument has to be positive.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/** Parse name */
+static int
+parse_name_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct rte_crypto_vdev_init_params *params = extra_args;
+
+ if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
+ CS_LOG_ERR("Invalid name %s, should be less than "
+ "%u bytes.\n", value,
+ RTE_CRYPTODEV_NAME_MAX_LEN - 1);
+ return -EINVAL;
+ }
+
+ strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
+
+ return 0;
+}
+
+/** Parse slave */
+static int
+parse_slave_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *param = extra_args;
+
+ if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES - 1) {
+ CS_LOG_ERR("Too many slaves.\n");
+ return -ENOMEM;
+ }
+
+ strncpy(param->slave_names[param->nb_slaves++], value,
+ RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN - 1);
+
+ return 0;
+}
+
+static int
+parse_mode_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *param = extra_args;
+ uint32_t i;
+
+ for (i = 0; i < RTE_DIM(scheduler_mode_map); i++) {
+ if (strcmp(value, scheduler_mode_map[i].name) == 0) {
+ param->mode = (enum rte_cryptodev_scheduler_mode)
+ scheduler_mode_map[i].val;
+ break;
+ }
+ }
+
+ if (i == RTE_DIM(scheduler_mode_map)) {
+ CS_LOG_ERR("Unrecognized input.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+parse_ordering_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct scheduler_init_params *param = extra_args;
+ uint32_t i;
+
+ for (i = 0; i < RTE_DIM(scheduler_ordering_map); i++) {
+ if (strcmp(value, scheduler_ordering_map[i].name) == 0) {
+ param->enable_ordering =
+ scheduler_ordering_map[i].val;
+ break;
+ }
+ }
+
+ if (i == RTE_DIM(scheduler_ordering_map)) {
+ CS_LOG_ERR("Unrecognized input.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+scheduler_parse_init_params(struct scheduler_init_params *params,
+ const char *input_args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args,
+ scheduler_valid_params);
+ if (kvlist == NULL)
+ return -1;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG,
+ &parse_integer_arg,
+ &params->def_p.max_nb_queue_pairs);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG,
+ &parse_integer_arg,
+ &params->def_p.max_nb_sessions);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SOCKET_ID,
+ &parse_integer_arg,
+ &params->def_p.socket_id);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_NAME,
+ &parse_name_arg,
+ &params->def_p);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SLAVE,
+ &parse_slave_arg, params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_MODE,
+ &parse_mode_arg, params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_ORDERING,
+ &parse_ordering_arg, params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ if (params->def_p.socket_id >= number_of_sockets()) {
+ CDEV_LOG_ERR("Invalid socket id specified to create "
+ "the virtual crypto device on");
+ goto free_kvlist;
+ }
+ }
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static int
+cryptodev_scheduler_probe(struct rte_vdev_device *vdev)
+{
+ struct scheduler_init_params init_params = {
+ .def_p = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id(),
+ ""
+ },
+ .nb_slaves = 0,
+ .mode = CDEV_SCHED_MODE_NOT_SET,
+ .enable_ordering = 0,
+ .slave_names = { {0} }
+ };
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ scheduler_parse_init_params(&init_params,
+ rte_vdev_device_args(vdev));
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n",
+ name,
+ init_params.def_p.socket_id);
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.def_p.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.def_p.max_nb_sessions);
+ if (init_params.def_p.name[0] != '\0')
+ RTE_LOG(INFO, PMD, " User defined name = %s\n",
+ init_params.def_p.name);
+
+ return cryptodev_scheduler_create(name,
+ &init_params);
+}
+
+static struct rte_vdev_driver cryptodev_scheduler_pmd_drv = {
+ .probe = cryptodev_scheduler_probe,
+ .remove = cryptodev_scheduler_remove
+};
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SCHEDULER_PMD,
+ cryptodev_scheduler_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int> "
+ "slave=<name>");
diff --git a/src/seastar/dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c b/src/seastar/dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c
new file mode 100644
index 00000000..2b5858df
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/scheduler/scheduler_pmd_ops.c
@@ -0,0 +1,571 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <string.h>
+
+#include <rte_config.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_reorder.h>
+
+#include "scheduler_pmd_private.h"
+
+/** attaching the slaves predefined by scheduler's EAL options */
+static int
+scheduler_attach_init_slave(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint8_t scheduler_id = dev->data->dev_id;
+ int i;
+
+ for (i = sched_ctx->nb_init_slaves - 1; i >= 0; i--) {
+ const char *dev_name = sched_ctx->init_slave_names[i];
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_named_dev(dev_name);
+ int status;
+
+ if (!slave_dev) {
+ CS_LOG_ERR("Failed to locate slave dev %s",
+ dev_name);
+ return -EINVAL;
+ }
+
+ status = rte_cryptodev_scheduler_slave_attach(
+ scheduler_id, slave_dev->data->dev_id);
+
+ if (status < 0) {
+ CS_LOG_ERR("Failed to attach slave cryptodev %u",
+ slave_dev->data->dev_id);
+ return status;
+ }
+
+ CS_LOG_INFO("Scheduler %s attached slave %s\n",
+ dev->data->name,
+ sched_ctx->init_slave_names[i]);
+
+ rte_free(sched_ctx->init_slave_names[i]);
+
+ sched_ctx->nb_init_slaves -= 1;
+ }
+
+ return 0;
+}
+/** Configure device */
+static int
+scheduler_pmd_config(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *config)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+ int ret;
+
+ /* although scheduler_attach_init_slave presents multiple times,
+ * there will be only 1 meaningful execution.
+ */
+ ret = scheduler_attach_init_slave(dev);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+
+ ret = rte_cryptodev_configure(slave_dev_id, config);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int
+update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+
+ if (sched_ctx->reordering_enabled) {
+ char order_ring_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ uint32_t buff_size = rte_align32pow2(
+ sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE);
+
+ if (qp_ctx->order_ring) {
+ rte_ring_free(qp_ctx->order_ring);
+ qp_ctx->order_ring = NULL;
+ }
+
+ if (!buff_size)
+ return 0;
+
+ if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+ "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
+ dev->data->dev_id, qp_id) < 0) {
+ CS_LOG_ERR("failed to create unique reorder buffer "
+ "name");
+ return -ENOMEM;
+ }
+
+ qp_ctx->order_ring = rte_ring_create(order_ring_name,
+ buff_size, rte_socket_id(),
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (!qp_ctx->order_ring) {
+ CS_LOG_ERR("failed to create order ring");
+ return -ENOMEM;
+ }
+ } else {
+ if (qp_ctx->order_ring) {
+ rte_ring_free(qp_ctx->order_ring);
+ qp_ctx->order_ring = NULL;
+ }
+ }
+
+ return 0;
+}
+
+/** Start device */
+static int
+scheduler_pmd_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+ int ret;
+
+ if (dev->data->dev_started)
+ return 0;
+
+ /* although scheduler_attach_init_slave presents multiple times,
+ * there will be only 1 meaningful execution.
+ */
+ ret = scheduler_attach_init_slave(dev);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = update_order_ring(dev, i);
+ if (ret < 0) {
+ CS_LOG_ERR("Failed to update reorder buffer");
+ return ret;
+ }
+ }
+
+ if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
+ CS_LOG_ERR("Scheduler mode is not set");
+ return -1;
+ }
+
+ if (!sched_ctx->nb_slaves) {
+ CS_LOG_ERR("No slave in the scheduler");
+ return -1;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP);
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+
+ if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) {
+ CS_LOG_ERR("Failed to attach slave");
+ return -ENOTSUP;
+ }
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP);
+
+ if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
+ CS_LOG_ERR("Scheduler start failed");
+ return -1;
+ }
+
+ /* start all slaves */
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_dev(slave_dev_id);
+
+ ret = (*slave_dev->dev_ops->dev_start)(slave_dev);
+ if (ret < 0) {
+ CS_LOG_ERR("Failed to start slave dev %u",
+ slave_dev_id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/** Stop device */
+static void
+scheduler_pmd_stop(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+
+ if (!dev->data->dev_started)
+ return;
+
+ /* stop all slaves first */
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_dev(slave_dev_id);
+
+ (*slave_dev->dev_ops->dev_stop)(slave_dev);
+ }
+
+ if (*sched_ctx->ops.scheduler_stop)
+ (*sched_ctx->ops.scheduler_stop)(dev);
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+
+ if (*sched_ctx->ops.slave_detach)
+ (*sched_ctx->ops.slave_detach)(dev, slave_dev_id);
+ }
+}
+
+/** Close device */
+static int
+scheduler_pmd_close(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+ int ret;
+
+ /* the dev should be stopped before being closed */
+ if (dev->data->dev_started)
+ return -EBUSY;
+
+ /* close all slaves first */
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_dev(slave_dev_id);
+
+ ret = (*slave_dev->dev_ops->dev_close)(slave_dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+
+ if (qp_ctx->order_ring) {
+ rte_ring_free(qp_ctx->order_ring);
+ qp_ctx->order_ring = NULL;
+ }
+
+ if (qp_ctx->private_qp_ctx) {
+ rte_free(qp_ctx->private_qp_ctx);
+ qp_ctx->private_qp_ctx = NULL;
+ }
+ }
+
+ if (sched_ctx->private_ctx)
+ rte_free(sched_ctx->private_ctx);
+
+ if (sched_ctx->capabilities)
+ rte_free(sched_ctx->capabilities);
+
+ return 0;
+}
+
+/** Get device statistics */
+static void
+scheduler_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_dev(slave_dev_id);
+ struct rte_cryptodev_stats slave_stats = {0};
+
+ (*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats);
+
+ stats->enqueued_count += slave_stats.enqueued_count;
+ stats->dequeued_count += slave_stats.dequeued_count;
+
+ stats->enqueue_err_count += slave_stats.enqueue_err_count;
+ stats->dequeue_err_count += slave_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t i;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev *slave_dev =
+ rte_cryptodev_pmd_get_dev(slave_dev_id);
+
+ (*slave_dev->dev_ops->stats_reset)(slave_dev);
+ }
+}
+
+/** Get device info */
+static void
+scheduler_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint32_t max_nb_sessions = sched_ctx->nb_slaves ?
+ UINT32_MAX : RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS;
+ uint32_t i;
+
+ if (!dev_info)
+ return;
+
+ /* although scheduler_attach_init_slave presents multiple times,
+ * there will be only 1 meaningful execution.
+ */
+ scheduler_attach_init_slave(dev);
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+ struct rte_cryptodev_info slave_info;
+
+ rte_cryptodev_info_get(slave_dev_id, &slave_info);
+ max_nb_sessions = slave_info.sym.max_nb_sessions <
+ max_nb_sessions ?
+ slave_info.sym.max_nb_sessions :
+ max_nb_sessions;
+ }
+
+ dev_info->dev_type = dev->dev_type;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = sched_ctx->capabilities;
+ dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
+ dev_info->sym.max_nb_sessions = max_nb_sessions;
+}
+
+/** Release queue pair */
+static int
+scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+
+ if (!qp_ctx)
+ return 0;
+
+ if (qp_ctx->order_ring)
+ rte_ring_free(qp_ctx->order_ring);
+ if (qp_ctx->private_qp_ctx)
+ rte_free(qp_ctx->private_qp_ctx);
+
+ rte_free(qp_ctx);
+ dev->data->queue_pairs[qp_id] = NULL;
+
+ return 0;
+}
+
+/** Setup a queue pair */
+static int
+scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ struct scheduler_qp_ctx *qp_ctx;
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ uint32_t i;
+ int ret;
+
+ if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
+ "CRYTO_SCHE PMD %u QP %u",
+ dev->data->dev_id, qp_id) < 0) {
+ CS_LOG_ERR("Failed to create unique queue pair name");
+ return -EFAULT;
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ scheduler_pmd_qp_release(dev, qp_id);
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ uint8_t slave_id = sched_ctx->slaves[i].dev_id;
+
+ ret = rte_cryptodev_queue_pair_setup(slave_id, qp_id,
+ qp_conf, socket_id);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Allocate the queue pair data structure. */
+ qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (qp_ctx == NULL)
+ return -ENOMEM;
+
+ /* The actual available object number = nb_descriptors - 1 */
+ qp_ctx->max_nb_objs = qp_conf->nb_descriptors - 1;
+
+ dev->data->queue_pairs[qp_id] = qp_ctx;
+
+ /* although scheduler_attach_init_slave presents multiple times,
+ * there will be only 1 meaningful execution.
+ */
+ ret = scheduler_attach_init_slave(dev);
+ if (ret < 0) {
+ CS_LOG_ERR("Failed to attach slave");
+ scheduler_pmd_qp_release(dev, qp_id);
+ return ret;
+ }
+
+ if (*sched_ctx->ops.config_queue_pair) {
+ if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
+ CS_LOG_ERR("Unable to configure queue pair");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/** Start queue pair */
+static int
+scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+scheduler_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+static uint32_t
+scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct scheduler_session);
+}
+
+static int
+config_slave_sess(struct scheduler_ctx *sched_ctx,
+ struct rte_crypto_sym_xform *xform,
+ struct scheduler_session *sess,
+ uint32_t create)
+{
+ uint32_t i;
+
+ for (i = 0; i < sched_ctx->nb_slaves; i++) {
+ struct scheduler_slave *slave = &sched_ctx->slaves[i];
+
+ if (sess->sessions[i]) {
+ if (create)
+ continue;
+ /* !create */
+ sess->sessions[i] = rte_cryptodev_sym_session_free(
+ slave->dev_id, sess->sessions[i]);
+ } else {
+ if (!create)
+ continue;
+ /* create */
+ sess->sessions[i] =
+ rte_cryptodev_sym_session_create(
+ slave->dev_id, xform);
+ if (!sess->sessions[i]) {
+ config_slave_sess(sched_ctx, NULL, sess, 0);
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+scheduler_pmd_session_clear(struct rte_cryptodev *dev,
+ void *sess)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+
+ config_slave_sess(sched_ctx, NULL, sess, 0);
+
+ memset(sess, 0, sizeof(struct scheduler_session));
+}
+
+static void *
+scheduler_pmd_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+
+ if (config_slave_sess(sched_ctx, xform, sess, 1) < 0) {
+ CS_LOG_ERR("unabled to config sym session");
+ return NULL;
+ }
+
+ return sess;
+}
+
+struct rte_cryptodev_ops scheduler_pmd_ops = {
+ .dev_configure = scheduler_pmd_config,
+ .dev_start = scheduler_pmd_start,
+ .dev_stop = scheduler_pmd_stop,
+ .dev_close = scheduler_pmd_close,
+
+ .stats_get = scheduler_pmd_stats_get,
+ .stats_reset = scheduler_pmd_stats_reset,
+
+ .dev_infos_get = scheduler_pmd_info_get,
+
+ .queue_pair_setup = scheduler_pmd_qp_setup,
+ .queue_pair_release = scheduler_pmd_qp_release,
+ .queue_pair_start = scheduler_pmd_qp_start,
+ .queue_pair_stop = scheduler_pmd_qp_stop,
+ .queue_pair_count = scheduler_pmd_qp_count,
+
+ .session_get_size = scheduler_pmd_session_get_size,
+ .session_configure = scheduler_pmd_session_configure,
+ .session_clear = scheduler_pmd_session_clear,
+};
+
+struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
diff --git a/src/seastar/dpdk/drivers/crypto/scheduler/scheduler_pmd_private.h b/src/seastar/dpdk/drivers/crypto/scheduler/scheduler_pmd_private.h
new file mode 100644
index 00000000..421dae37
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -0,0 +1,156 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCHEDULER_PMD_PRIVATE_H
+#define _SCHEDULER_PMD_PRIVATE_H
+
+#include "rte_cryptodev_scheduler.h"
+
+#define PER_SLAVE_BUFF_SIZE (256)
+
+#define CS_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_CRYPTO_SCHEDULER_DEBUG
+#define CS_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \
+ __func__, __LINE__, ## args)
+
+#define CS_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \
+ __func__, __LINE__, ## args)
+#else
+#define CS_LOG_INFO(fmt, args...)
+#define CS_LOG_DBG(fmt, args...)
+#endif
+
+struct scheduler_slave {
+ uint8_t dev_id;
+ uint16_t qp_id;
+ uint32_t nb_inflight_cops;
+
+ enum rte_cryptodev_type dev_type;
+};
+
+struct scheduler_ctx {
+ void *private_ctx;
+ /**< private scheduler context pointer */
+
+ struct rte_cryptodev_capabilities *capabilities;
+ uint32_t nb_capabilities;
+
+ uint32_t max_nb_queue_pairs;
+
+ struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
+ uint32_t nb_slaves;
+
+ enum rte_cryptodev_scheduler_mode mode;
+
+ struct rte_cryptodev_scheduler_ops ops;
+
+ uint8_t reordering_enabled;
+
+ char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
+ char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
+
+ char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
+ int nb_init_slaves;
+} __rte_cache_aligned;
+
+struct scheduler_qp_ctx {
+ void *private_qp_ctx;
+
+ uint32_t max_nb_objs;
+
+ struct rte_ring *order_ring;
+ uint32_t seqn;
+} __rte_cache_aligned;
+
+struct scheduler_session {
+ struct rte_cryptodev_sym_session *sessions[
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
+};
+
+static inline uint16_t __attribute__((always_inline))
+get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops)
+{
+ uint32_t count = rte_ring_free_count(order_ring);
+
+ return count > nb_ops ? nb_ops : count;
+}
+
+static inline void __attribute__((always_inline))
+scheduler_order_insert(struct rte_ring *order_ring,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ rte_ring_sp_enqueue_burst(order_ring, (void **)ops, nb_ops, NULL);
+}
+
+#define SCHEDULER_GET_RING_OBJ(order_ring, pos, op) do { \
+ struct rte_crypto_op **ring = (void *)&order_ring[1]; \
+ op = ring[(order_ring->cons.head + pos) & order_ring->mask]; \
+} while (0)
+
+static inline uint16_t __attribute__((always_inline))
+scheduler_order_drain(struct rte_ring *order_ring,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rte_crypto_op *op;
+ uint32_t nb_objs = rte_ring_count(order_ring);
+ uint32_t nb_ops_to_deq = 0;
+ uint32_t nb_ops_deqd = 0;
+
+ if (nb_objs > nb_ops)
+ nb_objs = nb_ops;
+
+ while (nb_ops_to_deq < nb_objs) {
+ SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ break;
+ nb_ops_to_deq++;
+ }
+
+ if (nb_ops_to_deq)
+ nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
+ (void **)ops, nb_ops_to_deq, NULL);
+
+ return nb_ops_deqd;
+}
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;
+
+#endif /* _SCHEDULER_PMD_PRIVATE_H */
diff --git a/src/seastar/dpdk/drivers/crypto/scheduler/scheduler_roundrobin.c b/src/seastar/dpdk/drivers/crypto/scheduler/scheduler_roundrobin.c
new file mode 100644
index 00000000..01162764
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/scheduler/scheduler_roundrobin.c
@@ -0,0 +1,281 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler_operations.h"
+#include "scheduler_pmd_private.h"
+
+struct rr_scheduler_qp_ctx {
+ struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
+ uint32_t nb_slaves;
+
+ uint32_t last_enq_slave_idx;
+ uint32_t last_deq_slave_idx;
+};
+
+static uint16_t
+schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rr_scheduler_qp_ctx *rr_qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
+ struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
+ uint16_t i, processed_ops;
+ struct rte_cryptodev_sym_session *sessions[nb_ops];
+ struct scheduler_session *sess0, *sess1, *sess2, *sess3;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ for (i = 0; i < nb_ops && i < 4; i++)
+ rte_prefetch0(ops[i]->sym->session);
+
+ for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
+ sess0 = (struct scheduler_session *)
+ ops[i]->sym->session->_private;
+ sess1 = (struct scheduler_session *)
+ ops[i+1]->sym->session->_private;
+ sess2 = (struct scheduler_session *)
+ ops[i+2]->sym->session->_private;
+ sess3 = (struct scheduler_session *)
+ ops[i+3]->sym->session->_private;
+
+ sessions[i] = ops[i]->sym->session;
+ sessions[i + 1] = ops[i + 1]->sym->session;
+ sessions[i + 2] = ops[i + 2]->sym->session;
+ sessions[i + 3] = ops[i + 3]->sym->session;
+
+ ops[i]->sym->session = sess0->sessions[slave_idx];
+ ops[i + 1]->sym->session = sess1->sessions[slave_idx];
+ ops[i + 2]->sym->session = sess2->sessions[slave_idx];
+ ops[i + 3]->sym->session = sess3->sessions[slave_idx];
+
+ rte_prefetch0(ops[i + 4]->sym->session);
+ rte_prefetch0(ops[i + 5]->sym->session);
+ rte_prefetch0(ops[i + 6]->sym->session);
+ rte_prefetch0(ops[i + 7]->sym->session);
+ }
+
+ for (; i < nb_ops; i++) {
+ sess0 = (struct scheduler_session *)
+ ops[i]->sym->session->_private;
+ sessions[i] = ops[i]->sym->session;
+ ops[i]->sym->session = sess0->sessions[slave_idx];
+ }
+
+ processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+
+ slave->nb_inflight_cops += processed_ops;
+
+ rr_qp_ctx->last_enq_slave_idx += 1;
+ rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
+
+ /* recover session if enqueue is failed */
+ if (unlikely(processed_ops < nb_ops)) {
+ for (i = processed_ops; i < nb_ops; i++)
+ ops[i]->sym->session = sessions[i];
+ }
+
+ return processed_ops;
+}
+
+static uint16_t
+schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+ uint16_t nb_ops_to_enq = get_max_enqueue_order_count(order_ring,
+ nb_ops);
+ uint16_t nb_ops_enqd = schedule_enqueue(qp, ops,
+ nb_ops_to_enq);
+
+ scheduler_order_insert(order_ring, ops, nb_ops_enqd);
+
+ return nb_ops_enqd;
+}
+
+
+static uint16_t
+schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct rr_scheduler_qp_ctx *rr_qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct scheduler_slave *slave;
+ uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
+ uint16_t nb_deq_ops;
+
+ if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) {
+ do {
+ last_slave_idx += 1;
+
+ if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))
+ last_slave_idx = 0;
+ /* looped back, means no inflight cops in the queue */
+ if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx)
+ return 0;
+ } while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops
+ == 0);
+ }
+
+ slave = &rr_qp_ctx->slaves[last_slave_idx];
+
+ nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+
+ last_slave_idx += 1;
+ last_slave_idx %= rr_qp_ctx->nb_slaves;
+
+ rr_qp_ctx->last_deq_slave_idx = last_slave_idx;
+
+ slave->nb_inflight_cops -= nb_deq_ops;
+
+ return nb_deq_ops;
+}
+
+static uint16_t
+schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_ring *order_ring =
+ ((struct scheduler_qp_ctx *)qp)->order_ring;
+
+ schedule_dequeue(qp, ops, nb_ops);
+
+ return scheduler_order_drain(order_ring, ops, nb_ops);
+}
+
+static int
+slave_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+slave_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+scheduler_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint16_t i;
+
+ if (sched_ctx->reordering_enabled) {
+ dev->enqueue_burst = &schedule_enqueue_ordering;
+ dev->dequeue_burst = &schedule_dequeue_ordering;
+ } else {
+ dev->enqueue_burst = &schedule_enqueue;
+ dev->dequeue_burst = &schedule_dequeue;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+ struct rr_scheduler_qp_ctx *rr_qp_ctx =
+ qp_ctx->private_qp_ctx;
+ uint32_t j;
+
+ memset(rr_qp_ctx->slaves, 0,
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
+ sizeof(struct scheduler_slave));
+ for (j = 0; j < sched_ctx->nb_slaves; j++) {
+ rr_qp_ctx->slaves[j].dev_id =
+ sched_ctx->slaves[j].dev_id;
+ rr_qp_ctx->slaves[j].qp_id = i;
+ }
+
+ rr_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
+
+ rr_qp_ctx->last_enq_slave_idx = 0;
+ rr_qp_ctx->last_deq_slave_idx = 0;
+ }
+
+ return 0;
+}
+
+static int
+scheduler_stop(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+static int
+scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+ struct rr_scheduler_qp_ctx *rr_qp_ctx;
+
+ rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0,
+ rte_socket_id());
+ if (!rr_qp_ctx) {
+ CS_LOG_ERR("failed allocate memory for private queue pair");
+ return -ENOMEM;
+ }
+
+ qp_ctx->private_qp_ctx = (void *)rr_qp_ctx;
+
+ return 0;
+}
+
+static int
+scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
+ slave_attach,
+ slave_detach,
+ scheduler_start,
+ scheduler_stop,
+ scheduler_config_qp,
+ scheduler_create_private_ctx,
+ NULL, /* option_set */
+ NULL /* option_get */
+};
+
+struct rte_cryptodev_scheduler scheduler = {
+ .name = "roundrobin-scheduler",
+ .description = "scheduler which will round robin burst across "
+ "slave crypto devices",
+ .mode = CDEV_SCHED_MODE_ROUNDROBIN,
+ .ops = &scheduler_rr_ops
+};
+
+struct rte_cryptodev_scheduler *roundrobin_scheduler = &scheduler;
diff --git a/src/seastar/dpdk/drivers/crypto/snow3g/Makefile b/src/seastar/dpdk/drivers/crypto/snow3g/Makefile
new file mode 100644
index 00000000..ecee80df
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/snow3g/Makefile
@@ -0,0 +1,62 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifeq ($(LIBSSO_SNOW3G_PATH),)
+$(error "Please define LIBSSO_SNOW3G_PATH environment variable")
+endif
+endif
+
+# library name
+LIB = librte_pmd_snow3g.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_snow3g_version.map
+
+# external library dependencies
+CFLAGS += -I$(LIBSSO_SNOW3G_PATH)
+CFLAGS += -I$(LIBSSO_SNOW3G_PATH)/include
+CFLAGS += -I$(LIBSSO_SNOW3G_PATH)/build
+LDLIBS += -L$(LIBSSO_SNOW3G_PATH)/build -lsso_snow3g
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += rte_snow3g_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += rte_snow3g_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/seastar/dpdk/drivers/crypto/snow3g/rte_pmd_snow3g_version.map b/src/seastar/dpdk/drivers/crypto/snow3g/rte_pmd_snow3g_version.map
new file mode 100644
index 00000000..dc4d417b
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/snow3g/rte_pmd_snow3g_version.map
@@ -0,0 +1,3 @@
+DPDK_16.04 {
+ local: *;
+};
diff --git a/src/seastar/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd.c b/src/seastar/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd.c
new file mode 100644
index 00000000..960956ca
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -0,0 +1,655 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_snow3g_pmd_private.h"
+
+#define SNOW3G_IV_LENGTH 16
+#define SNOW3G_DIGEST_LENGTH 4
+#define SNOW3G_MAX_BURST 8
+#define BYTE_LEN 8
+
+/** Get xform chain order. */
+static enum snow3g_operation
+snow3g_get_mode(const struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return SNOW3G_OP_NOT_SUPPORTED;
+
+ if (xform->next)
+ if (xform->next->next != NULL)
+ return SNOW3G_OP_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return SNOW3G_OP_ONLY_AUTH;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return SNOW3G_OP_AUTH_CIPHER;
+ else
+ return SNOW3G_OP_NOT_SUPPORTED;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return SNOW3G_OP_ONLY_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return SNOW3G_OP_CIPHER_AUTH;
+ else
+ return SNOW3G_OP_NOT_SUPPORTED;
+ }
+
+ return SNOW3G_OP_NOT_SUPPORTED;
+}
+
+
+/** Parse crypto xform chain and set private session parameters. */
+int
+snow3g_set_session_parameters(struct snow3g_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ enum snow3g_operation mode;
+
+ /* Select Crypto operation - hash then cipher / cipher then hash */
+ mode = snow3g_get_mode(xform);
+
+ switch (mode) {
+ case SNOW3G_OP_CIPHER_AUTH:
+ auth_xform = xform->next;
+
+ /* Fall-through */
+ case SNOW3G_OP_ONLY_CIPHER:
+ cipher_xform = xform;
+ break;
+ case SNOW3G_OP_AUTH_CIPHER:
+ cipher_xform = xform->next;
+ /* Fall-through */
+ case SNOW3G_OP_ONLY_AUTH:
+ auth_xform = xform;
+ break;
+ case SNOW3G_OP_NOT_SUPPORTED:
+ default:
+ SNOW3G_LOG_ERR("Unsupported operation chain order parameter");
+ return -EINVAL;
+ }
+
+ if (cipher_xform) {
+ /* Only SNOW 3G UEA2 supported */
+ if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
+ return -EINVAL;
+ /* Initialize key */
+ sso_snow3g_init_key_sched(cipher_xform->cipher.key.data,
+ &sess->pKeySched_cipher);
+ }
+
+ if (auth_xform) {
+ /* Only SNOW 3G UIA2 supported */
+ if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2)
+ return -EINVAL;
+ sess->auth_op = auth_xform->auth.op;
+ /* Initialize key */
+ sso_snow3g_init_key_sched(auth_xform->auth.key.data,
+ &sess->pKeySched_hash);
+ }
+
+
+ sess->op = mode;
+
+ return 0;
+}
+
+/** Get SNOW 3G session. */
+static struct snow3g_session *
+snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op)
+{
+ struct snow3g_session *sess;
+
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session->dev_type !=
+ RTE_CRYPTODEV_SNOW3G_PMD))
+ return NULL;
+
+ sess = (struct snow3g_session *)op->sym->session->_private;
+ } else {
+ struct rte_cryptodev_session *c_sess = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ return NULL;
+
+ sess = (struct snow3g_session *)c_sess->_private;
+
+ if (unlikely(snow3g_set_session_parameters(sess,
+ op->sym->xform) != 0))
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Encrypt/decrypt mbufs with same cipher key. */
+static uint8_t
+process_snow3g_cipher_op(struct rte_crypto_op **ops,
+ struct snow3g_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src[SNOW3G_MAX_BURST], *dst[SNOW3G_MAX_BURST];
+ uint8_t *IV[SNOW3G_MAX_BURST];
+ uint32_t num_bytes[SNOW3G_MAX_BURST];
+
+ for (i = 0; i < num_ops; i++) {
+ /* Sanity checks. */
+ if (unlikely(ops[i]->sym->cipher.iv.length != SNOW3G_IV_LENGTH)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ SNOW3G_LOG_ERR("iv");
+ break;
+ }
+
+ src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ dst[i] = ops[i]->sym->m_dst ?
+ rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3) :
+ rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ IV[i] = ops[i]->sym->cipher.iv.data;
+ num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
+
+ processed_ops++;
+ }
+
+ sso_snow3g_f8_n_buffer(&session->pKeySched_cipher, IV, src, dst,
+ num_bytes, processed_ops);
+
+ return processed_ops;
+}
+
+/** Encrypt/decrypt mbuf (bit level function). */
+static uint8_t
+process_snow3g_cipher_op_bit(struct rte_crypto_op *op,
+ struct snow3g_session *session)
+{
+ uint8_t *src, *dst;
+ uint8_t *IV;
+ uint32_t length_in_bits, offset_in_bits;
+
+ /* Sanity checks. */
+ if (unlikely(op->sym->cipher.iv.length != SNOW3G_IV_LENGTH)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ SNOW3G_LOG_ERR("iv");
+ return 0;
+ }
+
+ offset_in_bits = op->sym->cipher.data.offset;
+ src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
+ if (op->sym->m_dst == NULL) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ SNOW3G_LOG_ERR("bit-level in-place not supported\n");
+ return 0;
+ }
+ dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
+ IV = op->sym->cipher.iv.data;
+ length_in_bits = op->sym->cipher.data.length;
+
+ sso_snow3g_f8_1_buffer_bit(&session->pKeySched_cipher, IV,
+ src, dst, length_in_bits, offset_in_bits);
+
+ return 1;
+}
+
+/** Generate/verify hash from mbufs with same hash key. */
+static int
+process_snow3g_hash_op(struct rte_crypto_op **ops,
+ struct snow3g_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src, *dst;
+ uint32_t length_in_bits;
+
+ for (i = 0; i < num_ops; i++) {
+ if (unlikely(ops[i]->sym->auth.aad.length != SNOW3G_IV_LENGTH)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ SNOW3G_LOG_ERR("aad");
+ break;
+ }
+
+ if (unlikely(ops[i]->sym->auth.digest.length != SNOW3G_DIGEST_LENGTH)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ SNOW3G_LOG_ERR("digest");
+ break;
+ }
+
+ /* Data must be byte aligned */
+ if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ SNOW3G_LOG_ERR("Offset");
+ break;
+ }
+
+ length_in_bits = ops[i]->sym->auth.data.length;
+
+ src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->auth.data.offset >> 3);
+
+ if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ dst = (uint8_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
+ ops[i]->sym->auth.digest.length);
+
+ sso_snow3g_f9_1_buffer(&session->pKeySched_hash,
+ ops[i]->sym->auth.aad.data, src,
+ length_in_bits, dst);
+ /* Verify digest. */
+ if (memcmp(dst, ops[i]->sym->auth.digest.data,
+ ops[i]->sym->auth.digest.length) != 0)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+ /* Trim area used for digest from mbuf. */
+ rte_pktmbuf_trim(ops[i]->sym->m_src,
+ ops[i]->sym->auth.digest.length);
+ } else {
+ dst = ops[i]->sym->auth.digest.data;
+
+ sso_snow3g_f9_1_buffer(&session->pKeySched_hash,
+ ops[i]->sym->auth.aad.data, src,
+ length_in_bits, dst);
+ }
+ processed_ops++;
+ }
+
+ return processed_ops;
+}
+
+/** Process a batch of crypto ops which shares the same session. */
+static int
+process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
+ struct snow3g_qp *qp, uint8_t num_ops,
+ uint16_t *accumulated_enqueued_ops)
+{
+ unsigned i;
+ unsigned enqueued_ops, processed_ops;
+
+#ifdef RTE_LIBRTE_PMD_SNOW3G_DEBUG
+ for (i = 0; i < num_ops; i++) {
+ if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
+ (ops[i]->sym->m_dst != NULL &&
+ !rte_pktmbuf_is_contiguous(
+ ops[i]->sym->m_dst))) {
+ SNOW3G_LOG_ERR("PMD supports only contiguous mbufs, "
+ "op (%p) provides noncontiguous mbuf as "
+ "source/destination buffer.\n", ops[i]);
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return 0;
+ }
+ }
+#endif
+
+ switch (session->op) {
+ case SNOW3G_OP_ONLY_CIPHER:
+ processed_ops = process_snow3g_cipher_op(ops,
+ session, num_ops);
+ break;
+ case SNOW3G_OP_ONLY_AUTH:
+ processed_ops = process_snow3g_hash_op(ops, session,
+ num_ops);
+ break;
+ case SNOW3G_OP_CIPHER_AUTH:
+ processed_ops = process_snow3g_cipher_op(ops, session,
+ num_ops);
+ process_snow3g_hash_op(ops, session, processed_ops);
+ break;
+ case SNOW3G_OP_AUTH_CIPHER:
+ processed_ops = process_snow3g_hash_op(ops, session,
+ num_ops);
+ process_snow3g_cipher_op(ops, session, processed_ops);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_ops = 0;
+ }
+
+ for (i = 0; i < num_ops; i++) {
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ /* Free session if a session-less crypto op. */
+ if (ops[i]->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
+ ops[i]->sym->session = NULL;
+ }
+ }
+
+ enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
+ (void **)ops, processed_ops, NULL);
+ qp->qp_stats.enqueued_count += enqueued_ops;
+ *accumulated_enqueued_ops += enqueued_ops;
+
+ return enqueued_ops;
+}
+
+/** Process a crypto op with length/offset in bits. */
+static int
+process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
+ struct snow3g_qp *qp, uint16_t *accumulated_enqueued_ops)
+{
+ unsigned enqueued_op, processed_op;
+
+ switch (session->op) {
+ case SNOW3G_OP_ONLY_CIPHER:
+ processed_op = process_snow3g_cipher_op_bit(op,
+ session);
+ break;
+ case SNOW3G_OP_ONLY_AUTH:
+ processed_op = process_snow3g_hash_op(&op, session, 1);
+ break;
+ case SNOW3G_OP_CIPHER_AUTH:
+ processed_op = process_snow3g_cipher_op_bit(op, session);
+ if (processed_op == 1)
+ process_snow3g_hash_op(&op, session, 1);
+ break;
+ case SNOW3G_OP_AUTH_CIPHER:
+ processed_op = process_snow3g_hash_op(&op, session, 1);
+ if (processed_op == 1)
+ process_snow3g_cipher_op_bit(op, session);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_op = 0;
+ }
+
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ /* Free session if a session-less crypto op. */
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ enqueued_op = rte_ring_enqueue_burst(qp->processed_ops,
+ (void **)&op, processed_op, NULL);
+ qp->qp_stats.enqueued_count += enqueued_op;
+ *accumulated_enqueued_ops += enqueued_op;
+
+ return enqueued_op;
+}
+
+static uint16_t
+snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_crypto_op *c_ops[SNOW3G_MAX_BURST];
+ struct rte_crypto_op *curr_c_op;
+
+ struct snow3g_session *prev_sess = NULL, *curr_sess = NULL;
+ struct snow3g_qp *qp = queue_pair;
+ unsigned i;
+ uint8_t burst_size = 0;
+ uint16_t enqueued_ops = 0;
+ uint8_t processed_ops;
+
+ for (i = 0; i < nb_ops; i++) {
+ curr_c_op = ops[i];
+
+ /* Set status as enqueued (not processed yet) by default. */
+ curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ curr_sess = snow3g_get_session(qp, curr_c_op);
+ if (unlikely(curr_sess == NULL ||
+ curr_sess->op == SNOW3G_OP_NOT_SUPPORTED)) {
+ curr_c_op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ break;
+ }
+
+ /* If length/offset is at bit-level, process this buffer alone. */
+ if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
+ || ((curr_c_op->sym->cipher.data.offset
+ % BYTE_LEN) != 0)) {
+ /* Process the ops of the previous session. */
+ if (prev_sess != NULL) {
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = NULL;
+ }
+
+ processed_ops = process_op_bit(curr_c_op, curr_sess,
+ qp, &enqueued_ops);
+ if (processed_ops != 1)
+ break;
+
+ continue;
+ }
+
+ /* Batch ops that share the same session. */
+ if (prev_sess == NULL) {
+ prev_sess = curr_sess;
+ c_ops[burst_size++] = curr_c_op;
+ } else if (curr_sess == prev_sess) {
+ c_ops[burst_size++] = curr_c_op;
+ /*
+ * When there are enough ops to process in a batch,
+ * process them, and start a new batch.
+ */
+ if (burst_size == SNOW3G_MAX_BURST) {
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = NULL;
+ }
+ } else {
+ /*
+ * Different session, process the ops
+ * of the previous session.
+ */
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = curr_sess;
+
+ c_ops[burst_size++] = curr_c_op;
+ }
+ }
+
+ if (burst_size != 0) {
+ /* Process the crypto ops of the last session. */
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ }
+
+ qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
+ return enqueued_ops;
+}
+
+static uint16_t
+snow3g_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **c_ops, uint16_t nb_ops)
+{
+ struct snow3g_qp *qp = queue_pair;
+
+ unsigned nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)c_ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+static int cryptodev_snow3g_remove(struct rte_vdev_device *vdev);
+
+static int
+cryptodev_snow3g_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_crypto_vdev_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct snow3g_private *internals;
+ uint64_t cpu_flags = 0;
+
+ if (init_params->name[0] == '\0')
+ snprintf(init_params->name, sizeof(init_params->name),
+ "%s", name);
+
+ /* Check CPU for supported vector instruction set */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+ cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+ else {
+ SNOW3G_LOG_ERR("Vector instructions are not supported by CPU");
+ return -EFAULT;
+ }
+
+ dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
+ sizeof(struct snow3g_private), init_params->socket_id);
+ if (dev == NULL) {
+ SNOW3G_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->dev_type = RTE_CRYPTODEV_SNOW3G_PMD;
+ dev->dev_ops = rte_snow3g_pmd_ops;
+
+ /* Register RX/TX burst functions for data path. */
+ dev->dequeue_burst = snow3g_pmd_dequeue_burst;
+ dev->enqueue_burst = snow3g_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ cpu_flags;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ return 0;
+init_error:
+ SNOW3G_LOG_ERR("driver %s: cryptodev_snow3g_create failed",
+ init_params->name);
+
+ cryptodev_snow3g_remove(vdev);
+ return -EFAULT;
+}
+
+static int
+cryptodev_snow3g_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_crypto_vdev_init_params init_params = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id(),
+ {0}
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+
+ rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.socket_id);
+ if (init_params.name[0] != '\0')
+ RTE_LOG(INFO, PMD, " User defined name = %s\n",
+ init_params.name);
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.max_nb_sessions);
+
+ return cryptodev_snow3g_create(name, vdev, &init_params);
+}
+
+static int
+cryptodev_snow3g_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Closing SNOW 3G crypto device %s"
+ " on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+static struct rte_vdev_driver cryptodev_snow3g_pmd_drv = {
+ .probe = cryptodev_snow3g_probe,
+ .remove = cryptodev_snow3g_remove
+};
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int>");
diff --git a/src/seastar/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c b/src/seastar/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
new file mode 100644
index 00000000..7ce96be9
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
@@ -0,0 +1,343 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_snow3g_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities snow3g_pmd_capabilities[] = {
+ { /* SNOW 3G (UIA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* SNOW 3G (UEA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+snow3g_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+snow3g_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+snow3g_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+snow3g_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+snow3g_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct snow3g_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+snow3g_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct snow3g_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+snow3g_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct snow3g_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->dev_type = dev->dev_type;
+ dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = snow3g_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+snow3g_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+snow3g_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct snow3g_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "snow3g_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place processed ops on */
+static struct rte_ring *
+snow3g_pmd_qp_create_processed_ops_ring(struct snow3g_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ SNOW3G_LOG_INFO("Reusing existing ring %s"
+ " for processed packets",
+ qp->name);
+ return r;
+ }
+
+ SNOW3G_LOG_ERR("Unable to reuse existing ring %s"
+ " for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+snow3g_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct snow3g_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ snow3g_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("SNOW 3G PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (snow3g_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = snow3g_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = dev->data->session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Start queue pair */
+static int
+snow3g_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+snow3g_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+snow3g_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the SNOW 3G session structure */
+static unsigned
+snow3g_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct snow3g_session);
+}
+
+/** Configure a SNOW 3G session from a crypto xform chain */
+static void *
+snow3g_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ if (unlikely(sess == NULL)) {
+ SNOW3G_LOG_ERR("invalid session struct");
+ return NULL;
+ }
+
+ if (snow3g_set_session_parameters(sess, xform) != 0) {
+ SNOW3G_LOG_ERR("failed configure session parameters");
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+snow3g_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+ /*
+ * Current just resetting the whole data structure, need to investigate
+ * whether a more selective reset of key would be more performant
+ */
+ if (sess)
+ memset(sess, 0, sizeof(struct snow3g_session));
+}
+
+struct rte_cryptodev_ops snow3g_pmd_ops = {
+ .dev_configure = snow3g_pmd_config,
+ .dev_start = snow3g_pmd_start,
+ .dev_stop = snow3g_pmd_stop,
+ .dev_close = snow3g_pmd_close,
+
+ .stats_get = snow3g_pmd_stats_get,
+ .stats_reset = snow3g_pmd_stats_reset,
+
+ .dev_infos_get = snow3g_pmd_info_get,
+
+ .queue_pair_setup = snow3g_pmd_qp_setup,
+ .queue_pair_release = snow3g_pmd_qp_release,
+ .queue_pair_start = snow3g_pmd_qp_start,
+ .queue_pair_stop = snow3g_pmd_qp_stop,
+ .queue_pair_count = snow3g_pmd_qp_count,
+
+ .session_get_size = snow3g_pmd_session_get_size,
+ .session_configure = snow3g_pmd_session_configure,
+ .session_clear = snow3g_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_snow3g_pmd_ops = &snow3g_pmd_ops;
diff --git a/src/seastar/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_private.h b/src/seastar/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
new file mode 100644
index 00000000..03973b97
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
@@ -0,0 +1,107 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_SNOW3G_PMD_PRIVATE_H_
+#define _RTE_SNOW3G_PMD_PRIVATE_H_
+
+#include <sso_snow3g.h>
+
+#define SNOW3G_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_SNOW3G_DEBUG
+#define SNOW3G_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), \
+ __func__, __LINE__, ## args)
+
+#define SNOW3G_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), \
+ __func__, __LINE__, ## args)
+#else
+#define SNOW3G_LOG_INFO(fmt, args...)
+#define SNOW3G_LOG_DBG(fmt, args...)
+#endif
+
+/** private data structure for each virtual SNOW 3G device */
+struct snow3g_private {
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+ unsigned max_nb_sessions;
+ /**< Max number of sessions supported by device */
+};
+
+/** SNOW 3G buffer queue pair */
+struct snow3g_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing processed ops */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+} __rte_cache_aligned;
+
+enum snow3g_operation {
+ SNOW3G_OP_ONLY_CIPHER,
+ SNOW3G_OP_ONLY_AUTH,
+ SNOW3G_OP_CIPHER_AUTH,
+ SNOW3G_OP_AUTH_CIPHER,
+ SNOW3G_OP_NOT_SUPPORTED
+};
+
+/** SNOW 3G private session structure */
+struct snow3g_session {
+ enum snow3g_operation op;
+ enum rte_crypto_auth_operation auth_op;
+ sso_snow3g_key_schedule_t pKeySched_cipher;
+ sso_snow3g_key_schedule_t pKeySched_hash;
+} __rte_cache_aligned;
+
+
+extern int
+snow3g_set_session_parameters(struct snow3g_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_snow3g_pmd_ops;
+
+
+
+#endif /* _RTE_SNOW3G_PMD_PRIVATE_H_ */
diff --git a/src/seastar/dpdk/drivers/crypto/zuc/Makefile b/src/seastar/dpdk/drivers/crypto/zuc/Makefile
new file mode 100644
index 00000000..f543b407
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/zuc/Makefile
@@ -0,0 +1,62 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifeq ($(LIBSSO_ZUC_PATH),)
+$(error "Please define LIBSSO_ZUC_PATH environment variable")
+endif
+endif
+
+# library name
+LIB = librte_pmd_zuc.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_zuc_version.map
+
+# external library dependencies
+CFLAGS += -I$(LIBSSO_ZUC_PATH)
+CFLAGS += -I$(LIBSSO_ZUC_PATH)/include
+CFLAGS += -I$(LIBSSO_ZUC_PATH)/build
+LDLIBS += -L$(LIBSSO_ZUC_PATH)/build -lsso_zuc
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += rte_zuc_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += rte_zuc_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/seastar/dpdk/drivers/crypto/zuc/rte_pmd_zuc_version.map b/src/seastar/dpdk/drivers/crypto/zuc/rte_pmd_zuc_version.map
new file mode 100644
index 00000000..cc5829e3
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/zuc/rte_pmd_zuc_version.map
@@ -0,0 +1,3 @@
+DPDK_16.11 {
+ local: *;
+};
diff --git a/src/seastar/dpdk/drivers/crypto/zuc/rte_zuc_pmd.c b/src/seastar/dpdk/drivers/crypto/zuc/rte_zuc_pmd.c
new file mode 100644
index 00000000..1020544b
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/zuc/rte_zuc_pmd.c
@@ -0,0 +1,554 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_zuc_pmd_private.h"
+
+#define ZUC_DIGEST_LENGTH 4
+#define ZUC_MAX_BURST 8
+#define BYTE_LEN 8
+
+/** Get xform chain order. */
+static enum zuc_operation
+zuc_get_mode(const struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return ZUC_OP_NOT_SUPPORTED;
+
+ if (xform->next)
+ if (xform->next->next != NULL)
+ return ZUC_OP_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return ZUC_OP_ONLY_AUTH;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return ZUC_OP_AUTH_CIPHER;
+ else
+ return ZUC_OP_NOT_SUPPORTED;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return ZUC_OP_ONLY_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return ZUC_OP_CIPHER_AUTH;
+ else
+ return ZUC_OP_NOT_SUPPORTED;
+ }
+
+ return ZUC_OP_NOT_SUPPORTED;
+}
+
+
+/** Parse crypto xform chain and set private session parameters. */
+int
+zuc_set_session_parameters(struct zuc_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ enum zuc_operation mode;
+
+ /* Select Crypto operation - hash then cipher / cipher then hash */
+ mode = zuc_get_mode(xform);
+
+ switch (mode) {
+ case ZUC_OP_CIPHER_AUTH:
+ auth_xform = xform->next;
+
+ /* Fall-through */
+ case ZUC_OP_ONLY_CIPHER:
+ cipher_xform = xform;
+ break;
+ case ZUC_OP_AUTH_CIPHER:
+ cipher_xform = xform->next;
+ /* Fall-through */
+ case ZUC_OP_ONLY_AUTH:
+ auth_xform = xform;
+ break;
+ case ZUC_OP_NOT_SUPPORTED:
+ default:
+ ZUC_LOG_ERR("Unsupported operation chain order parameter");
+ return -EINVAL;
+ }
+
+ if (cipher_xform) {
+ /* Only ZUC EEA3 supported */
+ if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_ZUC_EEA3)
+ return -EINVAL;
+ /* Copy the key */
+ memcpy(sess->pKey_cipher, cipher_xform->cipher.key.data,
+ ZUC_IV_KEY_LENGTH);
+ }
+
+ if (auth_xform) {
+ /* Only ZUC EIA3 supported */
+ if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_ZUC_EIA3)
+ return -EINVAL;
+ sess->auth_op = auth_xform->auth.op;
+ /* Copy the key */
+ memcpy(sess->pKey_hash, auth_xform->auth.key.data,
+ ZUC_IV_KEY_LENGTH);
+ }
+
+
+ sess->op = mode;
+
+ return 0;
+}
+
+/** Get ZUC session. */
+static struct zuc_session *
+zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op)
+{
+ struct zuc_session *sess;
+
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session->dev_type !=
+ RTE_CRYPTODEV_ZUC_PMD))
+ return NULL;
+
+ sess = (struct zuc_session *)op->sym->session->_private;
+ } else {
+ struct rte_cryptodev_session *c_sess = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ return NULL;
+
+ sess = (struct zuc_session *)c_sess->_private;
+
+ if (unlikely(zuc_set_session_parameters(sess,
+ op->sym->xform) != 0))
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Encrypt/decrypt mbufs with same cipher key. */
+static uint8_t
+process_zuc_cipher_op(struct rte_crypto_op **ops,
+ struct zuc_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src[ZUC_MAX_BURST], *dst[ZUC_MAX_BURST];
+ uint8_t *IV[ZUC_MAX_BURST];
+ uint32_t num_bytes[ZUC_MAX_BURST];
+ uint8_t *cipher_keys[ZUC_MAX_BURST];
+
+ for (i = 0; i < num_ops; i++) {
+ /* Sanity checks. */
+ if (unlikely(ops[i]->sym->cipher.iv.length != ZUC_IV_KEY_LENGTH)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ ZUC_LOG_ERR("iv");
+ break;
+ }
+
+ if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
+ || ((ops[i]->sym->cipher.data.offset
+ % BYTE_LEN) != 0)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ ZUC_LOG_ERR("Data Length or offset");
+ break;
+ }
+
+#ifdef RTE_LIBRTE_PMD_ZUC_DEBUG
+ if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
+ (ops[i]->sym->m_dst != NULL &&
+ !rte_pktmbuf_is_contiguous(
+ ops[i]->sym->m_dst))) {
+ ZUC_LOG_ERR("PMD supports only contiguous mbufs, "
+ "op (%p) provides noncontiguous mbuf as "
+ "source/destination buffer.\n", ops[i]);
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+#endif
+
+ src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ dst[i] = ops[i]->sym->m_dst ?
+ rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3) :
+ rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ IV[i] = ops[i]->sym->cipher.iv.data;
+ num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
+
+ cipher_keys[i] = session->pKey_cipher;
+
+ processed_ops++;
+ }
+
+ sso_zuc_eea3_n_buffer(cipher_keys, IV, src, dst,
+ num_bytes, processed_ops);
+
+ return processed_ops;
+}
+
+/** Generate/verify hash from mbufs with same hash key. */
+static int
+process_zuc_hash_op(struct rte_crypto_op **ops,
+ struct zuc_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src;
+ uint32_t *dst;
+ uint32_t length_in_bits;
+
+ for (i = 0; i < num_ops; i++) {
+ if (unlikely(ops[i]->sym->auth.aad.length != ZUC_IV_KEY_LENGTH)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ ZUC_LOG_ERR("aad");
+ break;
+ }
+
+ if (unlikely(ops[i]->sym->auth.digest.length != ZUC_DIGEST_LENGTH)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ ZUC_LOG_ERR("digest");
+ break;
+ }
+
+ /* Data must be byte aligned */
+ if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ ZUC_LOG_ERR("Offset");
+ break;
+ }
+
+ length_in_bits = ops[i]->sym->auth.data.length;
+
+ src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->auth.data.offset >> 3);
+
+ if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ dst = (uint32_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
+ ops[i]->sym->auth.digest.length);
+
+ sso_zuc_eia3_1_buffer(session->pKey_hash,
+ ops[i]->sym->auth.aad.data, src,
+ length_in_bits, dst);
+ /* Verify digest. */
+ if (memcmp(dst, ops[i]->sym->auth.digest.data,
+ ops[i]->sym->auth.digest.length) != 0)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+ /* Trim area used for digest from mbuf. */
+ rte_pktmbuf_trim(ops[i]->sym->m_src,
+ ops[i]->sym->auth.digest.length);
+ } else {
+ dst = (uint32_t *)ops[i]->sym->auth.digest.data;
+
+ sso_zuc_eia3_1_buffer(session->pKey_hash,
+ ops[i]->sym->auth.aad.data, src,
+ length_in_bits, dst);
+ }
+ processed_ops++;
+ }
+
+ return processed_ops;
+}
+
+/** Process a batch of crypto ops which shares the same session. */
+static int
+process_ops(struct rte_crypto_op **ops, struct zuc_session *session,
+ struct zuc_qp *qp, uint8_t num_ops,
+ uint16_t *accumulated_enqueued_ops)
+{
+ unsigned i;
+ unsigned enqueued_ops, processed_ops;
+
+ switch (session->op) {
+ case ZUC_OP_ONLY_CIPHER:
+ processed_ops = process_zuc_cipher_op(ops,
+ session, num_ops);
+ break;
+ case ZUC_OP_ONLY_AUTH:
+ processed_ops = process_zuc_hash_op(ops, session,
+ num_ops);
+ break;
+ case ZUC_OP_CIPHER_AUTH:
+ processed_ops = process_zuc_cipher_op(ops, session,
+ num_ops);
+ process_zuc_hash_op(ops, session, processed_ops);
+ break;
+ case ZUC_OP_AUTH_CIPHER:
+ processed_ops = process_zuc_hash_op(ops, session,
+ num_ops);
+ process_zuc_cipher_op(ops, session, processed_ops);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_ops = 0;
+ }
+
+ for (i = 0; i < num_ops; i++) {
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ /* Free session if a session-less crypto op. */
+ if (ops[i]->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
+ ops[i]->sym->session = NULL;
+ }
+ }
+
+ enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
+ (void **)ops, processed_ops, NULL);
+ qp->qp_stats.enqueued_count += enqueued_ops;
+ *accumulated_enqueued_ops += enqueued_ops;
+
+ return enqueued_ops;
+}
+
+static uint16_t
+zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_crypto_op *c_ops[ZUC_MAX_BURST];
+ struct rte_crypto_op *curr_c_op;
+
+ struct zuc_session *prev_sess = NULL, *curr_sess = NULL;
+ struct zuc_qp *qp = queue_pair;
+ unsigned i;
+ uint8_t burst_size = 0;
+ uint16_t enqueued_ops = 0;
+ uint8_t processed_ops;
+
+ for (i = 0; i < nb_ops; i++) {
+ curr_c_op = ops[i];
+
+ /* Set status as enqueued (not processed yet) by default. */
+ curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ curr_sess = zuc_get_session(qp, curr_c_op);
+ if (unlikely(curr_sess == NULL ||
+ curr_sess->op == ZUC_OP_NOT_SUPPORTED)) {
+ curr_c_op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ break;
+ }
+
+ /* Batch ops that share the same session. */
+ if (prev_sess == NULL) {
+ prev_sess = curr_sess;
+ c_ops[burst_size++] = curr_c_op;
+ } else if (curr_sess == prev_sess) {
+ c_ops[burst_size++] = curr_c_op;
+ /*
+ * When there are enough ops to process in a batch,
+ * process them, and start a new batch.
+ */
+ if (burst_size == ZUC_MAX_BURST) {
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = NULL;
+ }
+ } else {
+ /*
+ * Different session, process the ops
+ * of the previous session.
+ */
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = curr_sess;
+
+ c_ops[burst_size++] = curr_c_op;
+ }
+ }
+
+ if (burst_size != 0) {
+ /* Process the crypto ops of the last session. */
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ }
+
+ qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
+ return enqueued_ops;
+}
+
+static uint16_t
+zuc_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **c_ops, uint16_t nb_ops)
+{
+ struct zuc_qp *qp = queue_pair;
+
+ unsigned nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)c_ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+static int cryptodev_zuc_remove(struct rte_vdev_device *vdev);
+
+static int
+cryptodev_zuc_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_crypto_vdev_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct zuc_private *internals;
+ uint64_t cpu_flags = 0;
+
+ if (init_params->name[0] == '\0')
+ snprintf(init_params->name, sizeof(init_params->name),
+ "%s", name);
+
+ /* Check CPU for supported vector instruction set */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+ cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+ else {
+ ZUC_LOG_ERR("Vector instructions are not supported by CPU");
+ return -EFAULT;
+ }
+
+ dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
+ sizeof(struct zuc_private), init_params->socket_id);
+ if (dev == NULL) {
+ ZUC_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->dev_type = RTE_CRYPTODEV_ZUC_PMD;
+ dev->dev_ops = rte_zuc_pmd_ops;
+
+ /* Register RX/TX burst functions for data path. */
+ dev->dequeue_burst = zuc_pmd_dequeue_burst;
+ dev->enqueue_burst = zuc_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ cpu_flags;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ return 0;
+init_error:
+ ZUC_LOG_ERR("driver %s: cryptodev_zuc_create failed",
+ init_params->name);
+
+ cryptodev_zuc_remove(vdev);
+ return -EFAULT;
+}
+
+static int
+cryptodev_zuc_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_crypto_vdev_init_params init_params = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id(),
+ {0}
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
+
+ rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.socket_id);
+ if (init_params.name[0] != '\0')
+ RTE_LOG(INFO, PMD, " User defined name = %s\n",
+ init_params.name);
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.max_nb_sessions);
+
+ return cryptodev_zuc_create(name, vdev, &init_params);
+}
+
+static int
+cryptodev_zuc_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Closing ZUC crypto device %s"
+ " on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+static struct rte_vdev_driver cryptodev_zuc_pmd_drv = {
+ .probe = cryptodev_zuc_probe,
+ .remove = cryptodev_zuc_remove
+};
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ZUC_PMD, cryptodev_zuc_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ZUC_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int>");
diff --git a/src/seastar/dpdk/drivers/crypto/zuc/rte_zuc_pmd_ops.c b/src/seastar/dpdk/drivers/crypto/zuc/rte_zuc_pmd_ops.c
new file mode 100644
index 00000000..e793459c
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/zuc/rte_zuc_pmd_ops.c
@@ -0,0 +1,343 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_zuc_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities zuc_pmd_capabilities[] = {
+ { /* ZUC (EIA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_ZUC_EIA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* ZUC (EEA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+zuc_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+zuc_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+zuc_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+zuc_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+zuc_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zuc_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+zuc_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zuc_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+zuc_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct zuc_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->dev_type = dev->dev_type;
+ dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = zuc_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+zuc_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+zuc_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct zuc_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "zuc_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place processed ops on */
+static struct rte_ring *
+zuc_pmd_qp_create_processed_ops_ring(struct zuc_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ ZUC_LOG_INFO("Reusing existing ring %s"
+ " for processed packets",
+ qp->name);
+ return r;
+ }
+
+ ZUC_LOG_ERR("Unable to reuse existing ring %s"
+ " for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+zuc_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct zuc_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ zuc_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("ZUC PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (zuc_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = zuc_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = dev->data->session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Start queue pair */
+static int
+zuc_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+zuc_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+zuc_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the ZUC session structure */
+static unsigned
+zuc_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct zuc_session);
+}
+
+/** Configure a ZUC session from a crypto xform chain */
+static void *
+zuc_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ if (unlikely(sess == NULL)) {
+ ZUC_LOG_ERR("invalid session struct");
+ return NULL;
+ }
+
+ if (zuc_set_session_parameters(sess, xform) != 0) {
+ ZUC_LOG_ERR("failed configure session parameters");
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+zuc_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+ /*
+ * Current just resetting the whole data structure, need to investigate
+ * whether a more selective reset of key would be more performant
+ */
+ if (sess)
+ memset(sess, 0, sizeof(struct zuc_session));
+}
+
+struct rte_cryptodev_ops zuc_pmd_ops = {
+ .dev_configure = zuc_pmd_config,
+ .dev_start = zuc_pmd_start,
+ .dev_stop = zuc_pmd_stop,
+ .dev_close = zuc_pmd_close,
+
+ .stats_get = zuc_pmd_stats_get,
+ .stats_reset = zuc_pmd_stats_reset,
+
+ .dev_infos_get = zuc_pmd_info_get,
+
+ .queue_pair_setup = zuc_pmd_qp_setup,
+ .queue_pair_release = zuc_pmd_qp_release,
+ .queue_pair_start = zuc_pmd_qp_start,
+ .queue_pair_stop = zuc_pmd_qp_stop,
+ .queue_pair_count = zuc_pmd_qp_count,
+
+ .session_get_size = zuc_pmd_session_get_size,
+ .session_configure = zuc_pmd_session_configure,
+ .session_clear = zuc_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_zuc_pmd_ops = &zuc_pmd_ops;
diff --git a/src/seastar/dpdk/drivers/crypto/zuc/rte_zuc_pmd_private.h b/src/seastar/dpdk/drivers/crypto/zuc/rte_zuc_pmd_private.h
new file mode 100644
index 00000000..030f120b
--- /dev/null
+++ b/src/seastar/dpdk/drivers/crypto/zuc/rte_zuc_pmd_private.h
@@ -0,0 +1,108 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ZUC_PMD_PRIVATE_H_
+#define _RTE_ZUC_PMD_PRIVATE_H_
+
+#include <sso_zuc.h>
+
+#define ZUC_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_ZUC_DEBUG
+#define ZUC_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \
+ __func__, __LINE__, ## args)
+
+#define ZUC_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \
+ __func__, __LINE__, ## args)
+#else
+#define ZUC_LOG_INFO(fmt, args...)
+#define ZUC_LOG_DBG(fmt, args...)
+#endif
+
+#define ZUC_IV_KEY_LENGTH 16
+/** private data structure for each virtual ZUC device */
+struct zuc_private {
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+ unsigned max_nb_sessions;
+ /**< Max number of sessions supported by device */
+};
+
+/** ZUC buffer queue pair */
+struct zuc_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing processed ops */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+} __rte_cache_aligned;
+
+enum zuc_operation {
+ ZUC_OP_ONLY_CIPHER,
+ ZUC_OP_ONLY_AUTH,
+ ZUC_OP_CIPHER_AUTH,
+ ZUC_OP_AUTH_CIPHER,
+ ZUC_OP_NOT_SUPPORTED
+};
+
+/** ZUC private session structure */
+struct zuc_session {
+ enum zuc_operation op;
+ enum rte_crypto_auth_operation auth_op;
+ uint8_t pKey_cipher[ZUC_IV_KEY_LENGTH];
+ uint8_t pKey_hash[ZUC_IV_KEY_LENGTH];
+} __rte_cache_aligned;
+
+
+extern int
+zuc_set_session_parameters(struct zuc_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_zuc_pmd_ops;
+
+
+
+#endif /* _RTE_ZUC_PMD_PRIVATE_H_ */