summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/crypto/octeontx2
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/spdk/dpdk/drivers/crypto/octeontx2
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/drivers/crypto/octeontx2')
-rw-r--r--src/spdk/dpdk/drivers/crypto/octeontx2/Makefile48
-rw-r--r--src/spdk/dpdk/drivers/crypto/octeontx2/meson.build37
-rw-r--r--src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev.c162
-rw-r--r--src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev.h43
-rw-r--r--src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_capabilities.c660
-rw-r--r--src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_capabilities.h16
-rw-r--r--src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_hw_access.c225
-rw-r--r--src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_hw_access.h149
-rw-r--r--src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_mbox.c229
-rw-r--r--src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_mbox.h32
-rw-r--r--src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_ops.c1251
-rw-r--r--src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_ops.h21
-rw-r--r--src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_qp.h35
-rw-r--r--src/spdk/dpdk/drivers/crypto/octeontx2/otx2_ipsec_fp.h350
-rw-r--r--src/spdk/dpdk/drivers/crypto/octeontx2/rte_pmd_octeontx2_crypto_version.map3
15 files changed, 3261 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/crypto/octeontx2/Makefile b/src/spdk/dpdk/drivers/crypto/octeontx2/Makefile
new file mode 100644
index 000000000..5f9a6a0e3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/octeontx2/Makefile
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (C) 2019 Marvell International Ltd.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_octeontx2_crypto.a
+
+# build flags
+CFLAGS += $(WERROR_FLAGS)
+
+LDLIBS += -lrte_eal -lrte_ethdev -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev -lrte_security
+LDLIBS += -lrte_pci -lrte_bus_pci
+LDLIBS += -lrte_common_cpt -lrte_common_octeontx2
+
+VPATH += $(RTE_SDK)/drivers/crypto/octeontx2
+
+CFLAGS += -O3
+CFLAGS += -I$(RTE_SDK)/drivers/common/cpt
+CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx2
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/octeontx2
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx2
+CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx2
+
+ifneq ($(CONFIG_RTE_ARCH_64),y)
+CFLAGS += -Wno-int-to-pointer-cast
+CFLAGS += -Wno-pointer-to-int-cast
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS += -diag-disable 2259
+endif
+endif
+
+# PMD code
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_CRYPTO) += otx2_cryptodev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_CRYPTO) += otx2_cryptodev_capabilities.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_CRYPTO) += otx2_cryptodev_hw_access.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_CRYPTO) += otx2_cryptodev_mbox.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_CRYPTO) += otx2_cryptodev_ops.c
+
+# export include files
+SYMLINK-y-include +=
+
+# versioning export map
+EXPORT_MAP := rte_pmd_octeontx2_crypto_version.map
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/octeontx2/meson.build b/src/spdk/dpdk/drivers/crypto/octeontx2/meson.build
new file mode 100644
index 000000000..a28c700b9
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/octeontx2/meson.build
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (C) 2019 Marvell International Ltd.
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['bus_pci']
+deps += ['common_cpt']
+deps += ['common_octeontx2']
+deps += ['ethdev']
+deps += ['security']
+name = 'octeontx2_crypto'
+
+sources = files('otx2_cryptodev.c',
+ 'otx2_cryptodev_capabilities.c',
+ 'otx2_cryptodev_hw_access.c',
+ 'otx2_cryptodev_mbox.c',
+ 'otx2_cryptodev_ops.c')
+
+extra_flags = []
+# This integrated controller runs only on a arm64 machine, remove 32bit warnings
+if not dpdk_conf.get('RTE_ARCH_64')
+ extra_flags += ['-Wno-int-to-pointer-cast', '-Wno-pointer-to-int-cast']
+endif
+
+foreach flag: extra_flags
+ if cc.has_argument(flag)
+ cflags += flag
+ endif
+endforeach
+
+includes += include_directories('../../common/cpt')
+includes += include_directories('../../common/octeontx2')
+includes += include_directories('../../crypto/octeontx2')
+includes += include_directories('../../mempool/octeontx2')
+includes += include_directories('../../net/octeontx2')
diff --git a/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev.c b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev.c
new file mode 100644
index 000000000..6ffbc2eb7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev.c
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_mempool.h>
+#include <rte_pci.h>
+
+#include "otx2_common.h"
+#include "otx2_cryptodev.h"
+#include "otx2_cryptodev_mbox.h"
+#include "otx2_cryptodev_ops.h"
+#include "otx2_dev.h"
+
+/* CPT common headers */
+#include "cpt_common.h"
+#include "cpt_pmd_logs.h"
+
+int otx2_cpt_logtype;
+
+uint8_t otx2_cryptodev_driver_id;
+
+static struct rte_pci_id pci_id_cpt_table[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_CPT_VF)
+ },
+ /* sentinel */
+ {
+ .device_id = 0
+ },
+};
+
+static int
+otx2_cpt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = rte_socket_id(),
+ .private_data_size = sizeof(struct otx2_cpt_vf)
+ };
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct rte_cryptodev *dev;
+ struct otx2_dev *otx2_dev;
+ struct otx2_cpt_vf *vf;
+ uint16_t nb_queues;
+ int ret;
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ dev = rte_cryptodev_pmd_create(name, &pci_dev->device, &init_params);
+ if (dev == NULL) {
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ dev->dev_ops = &otx2_cpt_ops;
+
+ dev->driver_id = otx2_cryptodev_driver_id;
+
+ /* Get private data space allocated */
+ vf = dev->data->dev_private;
+
+ otx2_dev = &vf->otx2_dev;
+
+ /* Initialize the base otx2_dev object */
+ ret = otx2_dev_init(pci_dev, otx2_dev);
+ if (ret) {
+ CPT_LOG_ERR("Could not initialize otx2_dev");
+ goto pmd_destroy;
+ }
+
+ /* Get number of queues available on the device */
+ ret = otx2_cpt_available_queues_get(dev, &nb_queues);
+ if (ret) {
+ CPT_LOG_ERR("Could not determine the number of queues available");
+ goto otx2_dev_fini;
+ }
+
+ /* Don't exceed the limits set per VF */
+ nb_queues = RTE_MIN(nb_queues, OTX2_CPT_MAX_QUEUES_PER_VF);
+
+ if (nb_queues == 0) {
+ CPT_LOG_ERR("No free queues available on the device");
+ goto otx2_dev_fini;
+ }
+
+ vf->max_queues = nb_queues;
+
+ CPT_LOG_INFO("Max queues supported by device: %d", vf->max_queues);
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT |
+ RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
+ RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
+
+ return 0;
+
+otx2_dev_fini:
+ otx2_dev_fini(pci_dev, otx2_dev);
+pmd_destroy:
+ rte_cryptodev_pmd_destroy(dev);
+exit:
+ CPT_LOG_ERR("Could not create device (vendor_id: 0x%x device_id: 0x%x)",
+ pci_dev->id.vendor_id, pci_dev->id.device_id);
+ return ret;
+}
+
+static int
+otx2_cpt_pci_remove(struct rte_pci_device *pci_dev)
+{
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct rte_cryptodev *dev;
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ dev = rte_cryptodev_pmd_get_named_dev(name);
+ if (dev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(dev);
+}
+
+static struct rte_pci_driver otx2_cryptodev_pmd = {
+ .id_table = pci_id_cpt_table,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = otx2_cpt_pci_probe,
+ .remove = otx2_cpt_pci_remove,
+};
+
+static struct cryptodev_driver otx2_cryptodev_drv;
+
+RTE_INIT(otx2_cpt_init_log);
+RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_OCTEONTX2_PMD, otx2_cryptodev_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_OCTEONTX2_PMD, pci_id_cpt_table);
+RTE_PMD_REGISTER_KMOD_DEP(CRYPTODEV_NAME_OCTEONTX2_PMD, "vfio-pci");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(otx2_cryptodev_drv, otx2_cryptodev_pmd.driver,
+ otx2_cryptodev_driver_id);
+
+RTE_INIT(otx2_cpt_init_log)
+{
+ /* Bus level logs */
+ otx2_cpt_logtype = rte_log_register("pmd.crypto.octeontx2");
+ if (otx2_cpt_logtype >= 0)
+ rte_log_set_level(otx2_cpt_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev.h b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev.h
new file mode 100644
index 000000000..c0aa661b3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_CRYPTODEV_H_
+#define _OTX2_CRYPTODEV_H_
+
+#include "cpt_common.h"
+
+#include "otx2_dev.h"
+
+/* Marvell OCTEON TX2 Crypto PMD device name */
+#define CRYPTODEV_NAME_OCTEONTX2_PMD crypto_octeontx2
+
+#define OTX2_CPT_MAX_LFS 64
+#define OTX2_CPT_MAX_QUEUES_PER_VF 64
+
+/**
+ * Device private data
+ */
+struct otx2_cpt_vf {
+ struct otx2_dev otx2_dev;
+ /**< Base class */
+ uint16_t max_queues;
+ /**< Max queues supported */
+ uint8_t nb_queues;
+ /**< Number of crypto queues attached */
+ uint16_t lf_msixoff[OTX2_CPT_MAX_LFS];
+ /**< MSI-X offsets */
+ uint8_t err_intr_registered:1;
+ /**< Are error interrupts registered? */
+};
+
+#define CPT_LOGTYPE otx2_cpt_logtype
+
+extern int otx2_cpt_logtype;
+
+/*
+ * Crypto device driver ID
+ */
+extern uint8_t otx2_cryptodev_driver_id;
+
+#endif /* _OTX2_CRYPTODEV_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_capabilities.c b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_capabilities.c
new file mode 100644
index 000000000..3eb3d8532
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_capabilities.c
@@ -0,0 +1,660 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_cryptodev.h>
+
+#include "otx2_cryptodev_capabilities.h"
+
+static const struct
+rte_cryptodev_capabilities otx2_cpt_capabilities[] = {
+ /* Symmetric capabilities */
+ { /* NULL (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ }, },
+ }, },
+ },
+ { /* AES GMAC (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* KASUMI (F9) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_KASUMI_F9,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* MD5 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 8,
+ .max = 64,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA1 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 1024,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA224 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 1024,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA256 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 1024,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA384 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 1024,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA512 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 1024,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SNOW 3G (UIA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* ZUC (EIA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_ZUC_EIA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* NULL (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, },
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 8
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES ECB */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_ECB,
+ .block_size = 8,
+ .key_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* AES XTS */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_XTS,
+ .block_size = 16,
+ .key_size = {
+ .min = 32,
+ .max = 64,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* KASUMI (F8) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_KASUMI_F8,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* SNOW 3G (UEA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* ZUC (EEA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 16,
+ .increment = 1
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 1024,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ /* End of symmetric capabilities */
+
+ /* Asymmetric capabilities */
+ { /* RSA */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_RSA,
+ .op_types = ((1 << RTE_CRYPTO_ASYM_OP_SIGN) |
+ (1 << RTE_CRYPTO_ASYM_OP_VERIFY) |
+ (1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) |
+ (1 << RTE_CRYPTO_ASYM_OP_DECRYPT)),
+ {.modlen = {
+ .min = 17,
+ .max = 1024,
+ .increment = 1
+ }, }
+ }
+ }, }
+ },
+ { /* MOD_EXP */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX,
+ .op_types = 0,
+ {.modlen = {
+ .min = 17,
+ .max = 1024,
+ .increment = 1
+ }, }
+ }
+ }, }
+ },
+ { /* ECDSA */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_ECDSA,
+ .op_types = ((1 << RTE_CRYPTO_ASYM_OP_SIGN) |
+ (1 << RTE_CRYPTO_ASYM_OP_VERIFY)),
+ }
+ },
+ }
+ },
+ { /* ECPM */
+ .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
+ {.asym = {
+ .xform_capa = {
+ .xform_type = RTE_CRYPTO_ASYM_XFORM_ECPM,
+ .op_types = 0
+ }
+ },
+ }
+ },
+ /* End of asymmetric capabilities */
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+const struct rte_cryptodev_capabilities *
+otx2_cpt_capabilities_get(void)
+{
+ return otx2_cpt_capabilities;
+}
diff --git a/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_capabilities.h b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_capabilities.h
new file mode 100644
index 000000000..f103c32ed
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_capabilities.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_CRYPTODEV_CAPABILITIES_H_
+#define _OTX2_CRYPTODEV_CAPABILITIES_H_
+
+#include <rte_cryptodev.h>
+
+/*
+ * Get capabilities list for the device
+ *
+ */
+const struct rte_cryptodev_capabilities *otx2_cpt_capabilities_get(void);
+
+#endif /* _OTX2_CRYPTODEV_CAPABILITIES_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_hw_access.c b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_hw_access.c
new file mode 100644
index 000000000..9e4f78273
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_hw_access.c
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2019 Marvell International Ltd.
+ */
+#include <rte_cryptodev.h>
+
+#include "otx2_common.h"
+#include "otx2_cryptodev.h"
+#include "otx2_cryptodev_hw_access.h"
+#include "otx2_cryptodev_mbox.h"
+#include "otx2_cryptodev_ops.h"
+#include "otx2_dev.h"
+
+#include "cpt_pmd_logs.h"
+
+static void
+otx2_cpt_lf_err_intr_handler(void *param)
+{
+ uintptr_t base = (uintptr_t)param;
+ uint8_t lf_id;
+ uint64_t intr;
+
+ lf_id = (base >> 12) & 0xFF;
+
+ intr = otx2_read64(base + OTX2_CPT_LF_MISC_INT);
+ if (intr == 0)
+ return;
+
+ CPT_LOG_ERR("LF %d MISC_INT: 0x%" PRIx64 "", lf_id, intr);
+
+ /* Clear interrupt */
+ otx2_write64(intr, base + OTX2_CPT_LF_MISC_INT);
+}
+
+static void
+otx2_cpt_lf_err_intr_unregister(const struct rte_cryptodev *dev,
+ uint16_t msix_off, uintptr_t base)
+{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+
+ /* Disable error interrupts */
+ otx2_write64(~0ull, base + OTX2_CPT_LF_MISC_INT_ENA_W1C);
+
+ otx2_unregister_irq(handle, otx2_cpt_lf_err_intr_handler, (void *)base,
+ msix_off);
+}
+
+void
+otx2_cpt_err_intr_unregister(const struct rte_cryptodev *dev)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ uintptr_t base;
+ uint32_t i;
+
+ for (i = 0; i < vf->nb_queues; i++) {
+ base = OTX2_CPT_LF_BAR2(vf, i);
+ otx2_cpt_lf_err_intr_unregister(dev, vf->lf_msixoff[i], base);
+ }
+
+ vf->err_intr_registered = 0;
+}
+
+static int
+otx2_cpt_lf_err_intr_register(const struct rte_cryptodev *dev,
+ uint16_t msix_off, uintptr_t base)
+{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ int ret;
+
+ /* Disable error interrupts */
+ otx2_write64(~0ull, base + OTX2_CPT_LF_MISC_INT_ENA_W1C);
+
+ /* Register error interrupt handler */
+ ret = otx2_register_irq(handle, otx2_cpt_lf_err_intr_handler,
+ (void *)base, msix_off);
+ if (ret)
+ return ret;
+
+ /* Enable error interrupts */
+ otx2_write64(~0ull, base + OTX2_CPT_LF_MISC_INT_ENA_W1S);
+
+ return 0;
+}
+
+int
+otx2_cpt_err_intr_register(const struct rte_cryptodev *dev)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ uint32_t i, j, ret;
+ uintptr_t base;
+
+ for (i = 0; i < vf->nb_queues; i++) {
+ if (vf->lf_msixoff[i] == MSIX_VECTOR_INVALID) {
+ CPT_LOG_ERR("Invalid CPT LF MSI-X offset: 0x%x",
+ vf->lf_msixoff[i]);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < vf->nb_queues; i++) {
+ base = OTX2_CPT_LF_BAR2(vf, i);
+ ret = otx2_cpt_lf_err_intr_register(dev, vf->lf_msixoff[i],
+ base);
+ if (ret)
+ goto intr_unregister;
+ }
+
+ vf->err_intr_registered = 1;
+ return 0;
+
+intr_unregister:
+ /* Unregister the ones already registered */
+ for (j = 0; j < i; j++) {
+ base = OTX2_CPT_LF_BAR2(vf, j);
+ otx2_cpt_lf_err_intr_unregister(dev, vf->lf_msixoff[j], base);
+ }
+
+ /*
+ * Failed to register error interrupt. Not returning error as this would
+ * prevent application from enabling larger number of devs.
+ *
+ * This failure is a known issue because otx2_dev_init() initializes
+ * interrupts based on static values from ATF, and the actual number
+ * of interrupts needed (which is based on LFs) can be determined only
+ * after otx2_dev_init() sets up interrupts which includes mbox
+ * interrupts.
+ */
+ return 0;
+}
+
+int
+otx2_cpt_iq_enable(const struct rte_cryptodev *dev,
+ const struct otx2_cpt_qp *qp, uint8_t grp_mask, uint8_t pri,
+ uint32_t size_div40)
+{
+ union otx2_cpt_af_lf_ctl af_lf_ctl;
+ union otx2_cpt_lf_inprog inprog;
+ union otx2_cpt_lf_q_base base;
+ union otx2_cpt_lf_q_size size;
+ union otx2_cpt_lf_ctl lf_ctl;
+ int ret;
+
+ /* Set engine group mask and priority */
+
+ ret = otx2_cpt_af_reg_read(dev, OTX2_CPT_AF_LF_CTL(qp->id),
+ &af_lf_ctl.u);
+ if (ret)
+ return ret;
+ af_lf_ctl.s.grp = grp_mask;
+ af_lf_ctl.s.pri = pri ? 1 : 0;
+ ret = otx2_cpt_af_reg_write(dev, OTX2_CPT_AF_LF_CTL(qp->id),
+ af_lf_ctl.u);
+ if (ret)
+ return ret;
+
+ /* Set instruction queue base address */
+
+ base.u = otx2_read64(qp->base + OTX2_CPT_LF_Q_BASE);
+ base.s.fault = 0;
+ base.s.stopped = 0;
+ base.s.addr = qp->iq_dma_addr >> 7;
+ otx2_write64(base.u, qp->base + OTX2_CPT_LF_Q_BASE);
+
+ /* Set instruction queue size */
+
+ size.u = otx2_read64(qp->base + OTX2_CPT_LF_Q_SIZE);
+ size.s.size_div40 = size_div40;
+ otx2_write64(size.u, qp->base + OTX2_CPT_LF_Q_SIZE);
+
+ /* Enable instruction queue */
+
+ lf_ctl.u = otx2_read64(qp->base + OTX2_CPT_LF_CTL);
+ lf_ctl.s.ena = 1;
+ otx2_write64(lf_ctl.u, qp->base + OTX2_CPT_LF_CTL);
+
+ /* Start instruction execution */
+
+ inprog.u = otx2_read64(qp->base + OTX2_CPT_LF_INPROG);
+ inprog.s.eena = 1;
+ otx2_write64(inprog.u, qp->base + OTX2_CPT_LF_INPROG);
+
+ return 0;
+}
+
+void
+otx2_cpt_iq_disable(struct otx2_cpt_qp *qp)
+{
+ union otx2_cpt_lf_q_grp_ptr grp_ptr;
+ union otx2_cpt_lf_inprog inprog;
+ union otx2_cpt_lf_ctl ctl;
+ int cnt;
+
+ /* Stop instruction execution */
+ inprog.u = otx2_read64(qp->base + OTX2_CPT_LF_INPROG);
+ inprog.s.eena = 0x0;
+ otx2_write64(inprog.u, qp->base + OTX2_CPT_LF_INPROG);
+
+ /* Disable instructions enqueuing */
+ ctl.u = otx2_read64(qp->base + OTX2_CPT_LF_CTL);
+ ctl.s.ena = 0;
+ otx2_write64(ctl.u, qp->base + OTX2_CPT_LF_CTL);
+
+ /* Wait for instruction queue to become empty */
+ cnt = 0;
+ do {
+ inprog.u = otx2_read64(qp->base + OTX2_CPT_LF_INPROG);
+ if (inprog.s.grb_partial)
+ cnt = 0;
+ else
+ cnt++;
+ grp_ptr.u = otx2_read64(qp->base + OTX2_CPT_LF_Q_GRP_PTR);
+ } while ((cnt < 10) && (grp_ptr.s.nq_ptr != grp_ptr.s.dq_ptr));
+
+ cnt = 0;
+ do {
+ inprog.u = otx2_read64(qp->base + OTX2_CPT_LF_INPROG);
+ if ((inprog.s.inflight == 0) &&
+ (inprog.s.gwb_cnt < 40) &&
+ ((inprog.s.grb_cnt == 0) || (inprog.s.grb_cnt == 40)))
+ cnt++;
+ else
+ cnt = 0;
+ } while (cnt < 10);
+}
diff --git a/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_hw_access.h b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_hw_access.h
new file mode 100644
index 000000000..43db6a642
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_hw_access.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_CRYPTODEV_HW_ACCESS_H_
+#define _OTX2_CRYPTODEV_HW_ACCESS_H_
+
+#include <stdint.h>
+
+#include <rte_cryptodev.h>
+#include <rte_memory.h>
+
+#include "cpt_common.h"
+#include "cpt_hw_types.h"
+#include "cpt_mcode_defines.h"
+
+#include "otx2_dev.h"
+#include "otx2_cryptodev_qp.h"
+
+/* CPT instruction queue length */
+#define OTX2_CPT_IQ_LEN 8200
+
+#define OTX2_CPT_DEFAULT_CMD_QLEN OTX2_CPT_IQ_LEN
+
+/* Mask which selects all engine groups */
+#define OTX2_CPT_ENG_GRPS_MASK 0xFF
+
+/* Register offsets */
+
+/* LMT LF registers */
+#define OTX2_LMT_LF_LMTLINE(a) (0x0ull | (uint64_t)(a) << 3)
+
+/* CPT LF registers */
+#define OTX2_CPT_LF_CTL 0x10ull
+#define OTX2_CPT_LF_INPROG 0x40ull
+#define OTX2_CPT_LF_MISC_INT 0xb0ull
+#define OTX2_CPT_LF_MISC_INT_ENA_W1S 0xd0ull
+#define OTX2_CPT_LF_MISC_INT_ENA_W1C 0xe0ull
+#define OTX2_CPT_LF_Q_BASE 0xf0ull
+#define OTX2_CPT_LF_Q_SIZE 0x100ull
+#define OTX2_CPT_LF_Q_GRP_PTR 0x120ull
+#define OTX2_CPT_LF_NQ(a) (0x400ull | (uint64_t)(a) << 3)
+
+#define OTX2_CPT_AF_LF_CTL(a) (0x27000ull | (uint64_t)(a) << 3)
+
+#define OTX2_CPT_LF_BAR2(vf, q_id) \
+ ((vf)->otx2_dev.bar2 + \
+ ((RVU_BLOCK_ADDR_CPT0 << 20) | ((q_id) << 12)))
+
+#define OTX2_CPT_QUEUE_HI_PRIO 0x1
+
+union otx2_cpt_lf_ctl {
+ uint64_t u;
+ struct {
+ uint64_t ena : 1;
+ uint64_t fc_ena : 1;
+ uint64_t fc_up_crossing : 1;
+ uint64_t reserved_3_3 : 1;
+ uint64_t fc_hyst_bits : 4;
+ uint64_t reserved_8_63 : 56;
+ } s;
+};
+
+union otx2_cpt_lf_inprog {
+ uint64_t u;
+ struct {
+ uint64_t inflight : 9;
+ uint64_t reserved_9_15 : 7;
+ uint64_t eena : 1;
+ uint64_t grp_drp : 1;
+ uint64_t reserved_18_30 : 13;
+ uint64_t grb_partial : 1;
+ uint64_t grb_cnt : 8;
+ uint64_t gwb_cnt : 8;
+ uint64_t reserved_48_63 : 16;
+ } s;
+};
+
+union otx2_cpt_lf_q_base {
+ uint64_t u;
+ struct {
+ uint64_t fault : 1;
+ uint64_t stopped : 1;
+ uint64_t reserved_2_6 : 5;
+ uint64_t addr : 46;
+ uint64_t reserved_53_63 : 11;
+ } s;
+};
+
+union otx2_cpt_lf_q_size {
+ uint64_t u;
+ struct {
+ uint64_t size_div40 : 15;
+ uint64_t reserved_15_63 : 49;
+ } s;
+};
+
+union otx2_cpt_af_lf_ctl {
+ uint64_t u;
+ struct {
+ uint64_t pri : 1;
+ uint64_t reserved_1_8 : 8;
+ uint64_t pf_func_inst : 1;
+ uint64_t cont_err : 1;
+ uint64_t reserved_11_15 : 5;
+ uint64_t nixtx_en : 1;
+ uint64_t reserved_17_47 : 31;
+ uint64_t grp : 8;
+ uint64_t reserved_56_63 : 8;
+ } s;
+};
+
+union otx2_cpt_lf_q_grp_ptr {
+ uint64_t u;
+ struct {
+ uint64_t dq_ptr : 15;
+ uint64_t reserved_31_15 : 17;
+ uint64_t nq_ptr : 15;
+ uint64_t reserved_47_62 : 16;
+ uint64_t xq_xor : 1;
+ } s;
+};
+
+/*
+ * Enumeration cpt_9x_comp_e
+ *
+ * CPT 9X Completion Enumeration
+ * Enumerates the values of CPT_RES_S[COMPCODE].
+ */
+enum cpt_9x_comp_e {
+ CPT_9X_COMP_E_NOTDONE = 0x00,
+ CPT_9X_COMP_E_GOOD = 0x01,
+ CPT_9X_COMP_E_FAULT = 0x02,
+ CPT_9X_COMP_E_HWERR = 0x04,
+ CPT_9X_COMP_E_INSTERR = 0x05,
+ CPT_9X_COMP_E_LAST_ENTRY = 0x06
+};
+
+void otx2_cpt_err_intr_unregister(const struct rte_cryptodev *dev);
+
+int otx2_cpt_err_intr_register(const struct rte_cryptodev *dev);
+
+int otx2_cpt_iq_enable(const struct rte_cryptodev *dev,
+ const struct otx2_cpt_qp *qp, uint8_t grp_mask,
+ uint8_t pri, uint32_t size_div40);
+
+void otx2_cpt_iq_disable(struct otx2_cpt_qp *qp);
+
+#endif /* _OTX2_CRYPTODEV_HW_ACCESS_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_mbox.c b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_mbox.c
new file mode 100644
index 000000000..6bb8316ec
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_mbox.c
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2019 Marvell International Ltd.
+ */
+#include <rte_cryptodev.h>
+#include <rte_ethdev.h>
+
+#include "otx2_cryptodev.h"
+#include "otx2_cryptodev_hw_access.h"
+#include "otx2_cryptodev_mbox.h"
+#include "otx2_dev.h"
+#include "otx2_ethdev.h"
+#include "otx2_sec_idev.h"
+#include "otx2_mbox.h"
+
+#include "cpt_pmd_logs.h"
+
+int
+otx2_cpt_available_queues_get(const struct rte_cryptodev *dev,
+ uint16_t *nb_queues)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ struct otx2_dev *otx2_dev = &vf->otx2_dev;
+ struct free_rsrcs_rsp *rsp;
+ int ret;
+
+ otx2_mbox_alloc_msg_free_rsrc_cnt(otx2_dev->mbox);
+
+ ret = otx2_mbox_process_msg(otx2_dev->mbox, (void *)&rsp);
+ if (ret)
+ return -EIO;
+
+ *nb_queues = rsp->cpt;
+ return 0;
+}
+
+int
+otx2_cpt_queues_attach(const struct rte_cryptodev *dev, uint8_t nb_queues)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+ struct rsrc_attach_req *req;
+
+ /* Ask AF to attach required LFs */
+
+ req = otx2_mbox_alloc_msg_attach_resources(mbox);
+
+ /* 1 LF = 1 queue */
+ req->cptlfs = nb_queues;
+
+ if (otx2_mbox_process(mbox) < 0)
+ return -EIO;
+
+ /* Update number of attached queues */
+ vf->nb_queues = nb_queues;
+
+ return 0;
+}
+
+int
+otx2_cpt_queues_detach(const struct rte_cryptodev *dev)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+ struct rsrc_detach_req *req;
+
+ req = otx2_mbox_alloc_msg_detach_resources(mbox);
+ req->cptlfs = true;
+ req->partial = true;
+ if (otx2_mbox_process(mbox) < 0)
+ return -EIO;
+
+ /* Queues have been detached */
+ vf->nb_queues = 0;
+
+ return 0;
+}
+
+int
+otx2_cpt_msix_offsets_get(const struct rte_cryptodev *dev)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+ struct msix_offset_rsp *rsp;
+ uint32_t i, ret;
+
+ /* Get CPT MSI-X vector offsets */
+
+ otx2_mbox_alloc_msg_msix_offset(mbox);
+
+ ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < vf->nb_queues; i++)
+ vf->lf_msixoff[i] = rsp->cptlf_msixoff[i];
+
+ return 0;
+}
+
+static int
+otx2_cpt_send_mbox_msg(struct otx2_cpt_vf *vf)
+{
+ struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+ int ret;
+
+ otx2_mbox_msg_send(mbox, 0);
+
+ ret = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (ret < 0) {
+ CPT_LOG_ERR("Could not get mailbox response");
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+otx2_cpt_af_reg_read(const struct rte_cryptodev *dev, uint64_t reg,
+ uint64_t *val)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+ struct otx2_mbox_dev *mdev = &mbox->dev[0];
+ struct cpt_rd_wr_reg_msg *msg;
+ int ret, off;
+
+ msg = (struct cpt_rd_wr_reg_msg *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*msg),
+ sizeof(*msg));
+ if (msg == NULL) {
+ CPT_LOG_ERR("Could not allocate mailbox message");
+ return -EFAULT;
+ }
+
+ msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
+ msg->hdr.sig = OTX2_MBOX_REQ_SIG;
+ msg->hdr.pcifunc = vf->otx2_dev.pf_func;
+ msg->is_write = 0;
+ msg->reg_offset = reg;
+ msg->ret_val = val;
+
+ ret = otx2_cpt_send_mbox_msg(vf);
+ if (ret < 0)
+ return ret;
+
+ off = mbox->rx_start +
+ RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
+ msg = (struct cpt_rd_wr_reg_msg *) ((uintptr_t)mdev->mbase + off);
+
+ *val = msg->val;
+
+ return 0;
+}
+
+int
+otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg,
+ uint64_t val)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+ struct cpt_rd_wr_reg_msg *msg;
+
+ msg = (struct cpt_rd_wr_reg_msg *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*msg),
+ sizeof(*msg));
+ if (msg == NULL) {
+ CPT_LOG_ERR("Could not allocate mailbox message");
+ return -EFAULT;
+ }
+
+ msg->hdr.id = MBOX_MSG_CPT_RD_WR_REGISTER;
+ msg->hdr.sig = OTX2_MBOX_REQ_SIG;
+ msg->hdr.pcifunc = vf->otx2_dev.pf_func;
+ msg->is_write = 1;
+ msg->reg_offset = reg;
+ msg->val = val;
+
+ return otx2_cpt_send_mbox_msg(vf);
+}
+
+int
+otx2_cpt_inline_init(const struct rte_cryptodev *dev)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+ struct cpt_rx_inline_lf_cfg_msg *msg;
+ int ret;
+
+ msg = otx2_mbox_alloc_msg_cpt_rx_inline_lf_cfg(mbox);
+ msg->sso_pf_func = otx2_sso_pf_func_get();
+
+ otx2_mbox_msg_send(mbox, 0);
+ ret = otx2_mbox_process(mbox);
+ if (ret < 0)
+ return -EIO;
+
+ return 0;
+}
+
+int
+otx2_cpt_qp_ethdev_bind(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp,
+ uint16_t port_id)
+{
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ struct otx2_mbox *mbox = vf->otx2_dev.mbox;
+ struct cpt_inline_ipsec_cfg_msg *msg;
+ struct otx2_eth_dev *otx2_eth_dev;
+ int ret;
+
+ if (!otx2_eth_dev_is_sec_capable(&rte_eth_devices[port_id]))
+ return -EINVAL;
+
+ otx2_eth_dev = otx2_eth_pmd_priv(eth_dev);
+
+ msg = otx2_mbox_alloc_msg_cpt_inline_ipsec_cfg(mbox);
+ msg->dir = CPT_INLINE_OUTBOUND;
+ msg->enable = 1;
+ msg->slot = qp->id;
+
+ msg->nix_pf_func = otx2_eth_dev->pf_func;
+
+ otx2_mbox_msg_send(mbox, 0);
+ ret = otx2_mbox_process(mbox);
+ if (ret < 0)
+ return -EIO;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_mbox.h b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_mbox.h
new file mode 100644
index 000000000..ae66b0846
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_mbox.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_CRYPTODEV_MBOX_H_
+#define _OTX2_CRYPTODEV_MBOX_H_
+
+#include <rte_cryptodev.h>
+
+#include "otx2_cryptodev_hw_access.h"
+
+int otx2_cpt_available_queues_get(const struct rte_cryptodev *dev,
+ uint16_t *nb_queues);
+
+int otx2_cpt_queues_attach(const struct rte_cryptodev *dev, uint8_t nb_queues);
+
+int otx2_cpt_queues_detach(const struct rte_cryptodev *dev);
+
+int otx2_cpt_msix_offsets_get(const struct rte_cryptodev *dev);
+
+int otx2_cpt_af_reg_read(const struct rte_cryptodev *dev, uint64_t reg,
+ uint64_t *val);
+
+int otx2_cpt_af_reg_write(const struct rte_cryptodev *dev, uint64_t reg,
+ uint64_t val);
+
+int otx2_cpt_qp_ethdev_bind(const struct rte_cryptodev *dev,
+ struct otx2_cpt_qp *qp, uint16_t port_id);
+
+int otx2_cpt_inline_init(const struct rte_cryptodev *dev);
+
+#endif /* _OTX2_CRYPTODEV_MBOX_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_ops.c b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_ops.c
new file mode 100644
index 000000000..ad292a08f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_ops.c
@@ -0,0 +1,1251 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2019 Marvell International Ltd.
+ */
+
+#include <unistd.h>
+
+#include <rte_cryptodev_pmd.h>
+#include <rte_errno.h>
+#include <rte_ethdev.h>
+
+#include "otx2_cryptodev.h"
+#include "otx2_cryptodev_capabilities.h"
+#include "otx2_cryptodev_hw_access.h"
+#include "otx2_cryptodev_mbox.h"
+#include "otx2_cryptodev_ops.h"
+#include "otx2_mbox.h"
+#include "otx2_sec_idev.h"
+
+#include "cpt_hw_types.h"
+#include "cpt_pmd_logs.h"
+#include "cpt_pmd_ops_helper.h"
+#include "cpt_ucode.h"
+#include "cpt_ucode_asym.h"
+
+#define METABUF_POOL_CACHE_SIZE 512
+
+static uint64_t otx2_fpm_iova[CPT_EC_ID_PMAX];
+
+/* Forward declarations */
+
+static int
+otx2_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id);
+
+static void
+qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
+{
+ snprintf(name, size, "otx2_cpt_lf_mem_%u:%u", dev_id, qp_id);
+}
+
+static int
+otx2_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
+ struct otx2_cpt_qp *qp, uint8_t qp_id,
+ int nb_elements)
+{
+ char mempool_name[RTE_MEMPOOL_NAMESIZE];
+ struct cpt_qp_meta_info *meta_info;
+ struct rte_mempool *pool;
+ int ret, max_mlen;
+ int asym_mlen = 0;
+ int lb_mlen = 0;
+ int sg_mlen = 0;
+
+ if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
+
+ /* Get meta len for scatter gather mode */
+ sg_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode();
+
+ /* Extra 32B saved for future considerations */
+ sg_mlen += 4 * sizeof(uint64_t);
+
+ /* Get meta len for linear buffer (direct) mode */
+ lb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode();
+
+ /* Extra 32B saved for future considerations */
+ lb_mlen += 4 * sizeof(uint64_t);
+ }
+
+ if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
+
+ /* Get meta len required for asymmetric operations */
+ asym_mlen = cpt_pmd_ops_helper_asym_get_mlen();
+ }
+
+ /*
+ * Check max requirement for meta buffer to
+ * support crypto op of any type (sym/asym).
+ */
+ max_mlen = RTE_MAX(RTE_MAX(lb_mlen, sg_mlen), asym_mlen);
+
+ /* Allocate mempool */
+
+ snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "otx2_cpt_mb_%u:%u",
+ dev->data->dev_id, qp_id);
+
+ pool = rte_mempool_create_empty(mempool_name, nb_elements, max_mlen,
+ METABUF_POOL_CACHE_SIZE, 0,
+ rte_socket_id(), 0);
+
+ if (pool == NULL) {
+ CPT_LOG_ERR("Could not create mempool for metabuf");
+ return rte_errno;
+ }
+
+ ret = rte_mempool_set_ops_byname(pool, RTE_MBUF_DEFAULT_MEMPOOL_OPS,
+ NULL);
+ if (ret) {
+ CPT_LOG_ERR("Could not set mempool ops");
+ goto mempool_free;
+ }
+
+ ret = rte_mempool_populate_default(pool);
+ if (ret <= 0) {
+ CPT_LOG_ERR("Could not populate metabuf pool");
+ goto mempool_free;
+ }
+
+ meta_info = &qp->meta_info;
+
+ meta_info->pool = pool;
+ meta_info->lb_mlen = lb_mlen;
+ meta_info->sg_mlen = sg_mlen;
+
+ return 0;
+
+mempool_free:
+ rte_mempool_free(pool);
+ return ret;
+}
+
+static void
+otx2_cpt_metabuf_mempool_destroy(struct otx2_cpt_qp *qp)
+{
+ struct cpt_qp_meta_info *meta_info = &qp->meta_info;
+
+ rte_mempool_free(meta_info->pool);
+
+ meta_info->pool = NULL;
+ meta_info->lb_mlen = 0;
+ meta_info->sg_mlen = 0;
+}
+
+static int
+otx2_cpt_qp_inline_cfg(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
+{
+ static rte_atomic16_t port_offset = RTE_ATOMIC16_INIT(-1);
+ uint16_t port_id, nb_ethport = rte_eth_dev_count_avail();
+ int i, ret;
+
+ for (i = 0; i < nb_ethport; i++) {
+ port_id = rte_atomic16_add_return(&port_offset, 1) % nb_ethport;
+ if (otx2_eth_dev_is_sec_capable(&rte_eth_devices[port_id]))
+ break;
+ }
+
+ if (i >= nb_ethport)
+ return 0;
+
+ ret = otx2_cpt_qp_ethdev_bind(dev, qp, port_id);
+ if (ret)
+ return ret;
+
+ /* Publish inline Tx QP to eth dev security */
+ ret = otx2_sec_idev_tx_cpt_qp_add(port_id, qp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct otx2_cpt_qp *
+otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
+ uint8_t group)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ uint64_t pg_sz = sysconf(_SC_PAGESIZE);
+ const struct rte_memzone *lf_mem;
+ uint32_t len, iq_len, size_div40;
+ char name[RTE_MEMZONE_NAMESIZE];
+ uint64_t used_len, iova;
+ struct otx2_cpt_qp *qp;
+ uint64_t lmtline;
+ uint8_t *va;
+ int ret;
+
+ /* Allocate queue pair */
+ qp = rte_zmalloc_socket("OCTEON TX2 Crypto PMD Queue Pair", sizeof(*qp),
+ OTX2_ALIGN, 0);
+ if (qp == NULL) {
+ CPT_LOG_ERR("Could not allocate queue pair");
+ return NULL;
+ }
+
+ iq_len = OTX2_CPT_IQ_LEN;
+
+ /*
+ * Queue size must be a multiple of 40 and effective queue size to
+ * software is (size_div40 - 1) * 40
+ */
+ size_div40 = (iq_len + 40 - 1) / 40 + 1;
+
+ /* For pending queue */
+ len = iq_len * RTE_ALIGN(sizeof(struct rid), 8);
+
+ /* Space for instruction group memory */
+ len += size_div40 * 16;
+
+ /* So that instruction queues start as pg size aligned */
+ len = RTE_ALIGN(len, pg_sz);
+
+ /* For instruction queues */
+ len += OTX2_CPT_IQ_LEN * sizeof(union cpt_inst_s);
+
+ /* Wastage after instruction queues */
+ len = RTE_ALIGN(len, pg_sz);
+
+ qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
+ qp_id);
+
+ lf_mem = rte_memzone_reserve_aligned(name, len, vf->otx2_dev.node,
+ RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB,
+ RTE_CACHE_LINE_SIZE);
+ if (lf_mem == NULL) {
+ CPT_LOG_ERR("Could not allocate reserved memzone");
+ goto qp_free;
+ }
+
+ va = lf_mem->addr;
+ iova = lf_mem->iova;
+
+ memset(va, 0, len);
+
+ ret = otx2_cpt_metabuf_mempool_create(dev, qp, qp_id, iq_len);
+ if (ret) {
+ CPT_LOG_ERR("Could not create mempool for metabuf");
+ goto lf_mem_free;
+ }
+
+ /* Initialize pending queue */
+ qp->pend_q.rid_queue = (struct rid *)va;
+ qp->pend_q.enq_tail = 0;
+ qp->pend_q.deq_head = 0;
+ qp->pend_q.pending_count = 0;
+
+ used_len = iq_len * RTE_ALIGN(sizeof(struct rid), 8);
+ used_len += size_div40 * 16;
+ used_len = RTE_ALIGN(used_len, pg_sz);
+ iova += used_len;
+
+ qp->iq_dma_addr = iova;
+ qp->id = qp_id;
+ qp->base = OTX2_CPT_LF_BAR2(vf, qp_id);
+
+ lmtline = vf->otx2_dev.bar2 +
+ (RVU_BLOCK_ADDR_LMT << 20 | qp_id << 12) +
+ OTX2_LMT_LF_LMTLINE(0);
+
+ qp->lmtline = (void *)lmtline;
+
+ qp->lf_nq_reg = qp->base + OTX2_CPT_LF_NQ(0);
+
+ ret = otx2_sec_idev_tx_cpt_qp_remove(qp);
+ if (ret && (ret != -ENOENT)) {
+ CPT_LOG_ERR("Could not delete inline configuration");
+ goto mempool_destroy;
+ }
+
+ otx2_cpt_iq_disable(qp);
+
+ ret = otx2_cpt_qp_inline_cfg(dev, qp);
+ if (ret) {
+ CPT_LOG_ERR("Could not configure queue for inline IPsec");
+ goto mempool_destroy;
+ }
+
+ ret = otx2_cpt_iq_enable(dev, qp, group, OTX2_CPT_QUEUE_HI_PRIO,
+ size_div40);
+ if (ret) {
+ CPT_LOG_ERR("Could not enable instruction queue");
+ goto mempool_destroy;
+ }
+
+ return qp;
+
+mempool_destroy:
+ otx2_cpt_metabuf_mempool_destroy(qp);
+lf_mem_free:
+ rte_memzone_free(lf_mem);
+qp_free:
+ rte_free(qp);
+ return NULL;
+}
+
+static int
+otx2_cpt_qp_destroy(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
+{
+ const struct rte_memzone *lf_mem;
+ char name[RTE_MEMZONE_NAMESIZE];
+ int ret;
+
+ ret = otx2_sec_idev_tx_cpt_qp_remove(qp);
+ if (ret && (ret != -ENOENT)) {
+ CPT_LOG_ERR("Could not delete inline configuration");
+ return ret;
+ }
+
+ otx2_cpt_iq_disable(qp);
+
+ otx2_cpt_metabuf_mempool_destroy(qp);
+
+ qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
+ qp->id);
+
+ lf_mem = rte_memzone_lookup(name);
+
+ ret = rte_memzone_free(lf_mem);
+ if (ret)
+ return ret;
+
+ rte_free(qp);
+
+ return 0;
+}
+
+static int
+sym_xform_verify(struct rte_crypto_sym_xform *xform)
+{
+ if (xform->next) {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ return -ENOTSUP;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return -ENOTSUP;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
+ return -ENOTSUP;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
+ return -ENOTSUP;
+
+ } else {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
+ xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
+static int
+sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *pool)
+{
+ struct cpt_sess_misc *misc;
+ void *priv;
+ int ret;
+
+ ret = sym_xform_verify(xform);
+ if (unlikely(ret))
+ return ret;
+
+ if (unlikely(rte_mempool_get(pool, &priv))) {
+ CPT_LOG_ERR("Could not allocate session private data");
+ return -ENOMEM;
+ }
+
+ misc = priv;
+
+ for ( ; xform != NULL; xform = xform->next) {
+ switch (xform->type) {
+ case RTE_CRYPTO_SYM_XFORM_AEAD:
+ ret = fill_sess_aead(xform, misc);
+ break;
+ case RTE_CRYPTO_SYM_XFORM_CIPHER:
+ ret = fill_sess_cipher(xform, misc);
+ break;
+ case RTE_CRYPTO_SYM_XFORM_AUTH:
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
+ ret = fill_sess_gmac(xform, misc);
+ else
+ ret = fill_sess_auth(xform, misc);
+ break;
+ default:
+ ret = -1;
+ }
+
+ if (ret)
+ goto priv_put;
+ }
+
+ set_sym_session_private_data(sess, driver_id, misc);
+
+ misc->ctx_dma_addr = rte_mempool_virt2iova(misc) +
+ sizeof(struct cpt_sess_misc);
+
+ /*
+ * IE engines support IPsec operations
+ * SE engines support IPsec operations and Air-Crypto operations
+ */
+ if (misc->zsk_flag)
+ misc->egrp = OTX2_CPT_EGRP_SE;
+ else
+ misc->egrp = OTX2_CPT_EGRP_SE_IE;
+
+ return 0;
+
+priv_put:
+ rte_mempool_put(pool, priv);
+
+ return -ENOTSUP;
+}
+
+static void
+sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
+{
+ void *priv = get_sym_session_private_data(sess, driver_id);
+ struct rte_mempool *pool;
+
+ if (priv == NULL)
+ return;
+
+ memset(priv, 0, cpt_get_session_size());
+
+ pool = rte_mempool_from_obj(priv);
+
+ set_sym_session_private_data(sess, driver_id, NULL);
+
+ rte_mempool_put(pool, priv);
+}
+
+static __rte_always_inline int32_t __rte_hot
+otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
+ struct pending_queue *pend_q,
+ struct cpt_request_info *req)
+{
+ void *lmtline = qp->lmtline;
+ union cpt_inst_s inst;
+ uint64_t lmt_status;
+
+ if (unlikely(pend_q->pending_count >= OTX2_CPT_DEFAULT_CMD_QLEN))
+ return -EAGAIN;
+
+ inst.u[0] = 0;
+ inst.s9x.res_addr = req->comp_baddr;
+ inst.u[2] = 0;
+ inst.u[3] = 0;
+
+ inst.s9x.ei0 = req->ist.ei0;
+ inst.s9x.ei1 = req->ist.ei1;
+ inst.s9x.ei2 = req->ist.ei2;
+ inst.s9x.ei3 = req->ist.ei3;
+
+ req->time_out = rte_get_timer_cycles() +
+ DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
+
+ do {
+ /* Copy CPT command to LMTLINE */
+ memcpy(lmtline, &inst, sizeof(inst));
+
+ /*
+ * Make sure compiler does not reorder memcpy and ldeor.
+ * LMTST transactions are always flushed from the write
+ * buffer immediately, a DMB is not required to push out
+ * LMTSTs.
+ */
+ rte_cio_wmb();
+ lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
+ } while (lmt_status == 0);
+
+ pend_q->rid_queue[pend_q->enq_tail].rid = (uintptr_t)req;
+
+ /* We will use soft queue length here to limit requests */
+ MOD_INC(pend_q->enq_tail, OTX2_CPT_DEFAULT_CMD_QLEN);
+ pend_q->pending_count += 1;
+
+ return 0;
+}
+
+static __rte_always_inline int32_t __rte_hot
+otx2_cpt_enqueue_asym(struct otx2_cpt_qp *qp,
+ struct rte_crypto_op *op,
+ struct pending_queue *pend_q)
+{
+ struct cpt_qp_meta_info *minfo = &qp->meta_info;
+ struct rte_crypto_asym_op *asym_op = op->asym;
+ struct asym_op_params params = {0};
+ struct cpt_asym_sess_misc *sess;
+ vq_cmd_word3_t *w3;
+ uintptr_t *cop;
+ void *mdata;
+ int ret;
+
+ if (unlikely(rte_mempool_get(minfo->pool, &mdata) < 0)) {
+ CPT_LOG_ERR("Could not allocate meta buffer for request");
+ return -ENOMEM;
+ }
+
+ sess = get_asym_session_private_data(asym_op->session,
+ otx2_cryptodev_driver_id);
+
+ /* Store IO address of the mdata to meta_buf */
+ params.meta_buf = rte_mempool_virt2iova(mdata);
+
+ cop = mdata;
+ cop[0] = (uintptr_t)mdata;
+ cop[1] = (uintptr_t)op;
+ cop[2] = cop[3] = 0ULL;
+
+ params.req = RTE_PTR_ADD(cop, 4 * sizeof(uintptr_t));
+ params.req->op = cop;
+
+ /* Adjust meta_buf to point to end of cpt_request_info structure */
+ params.meta_buf += (4 * sizeof(uintptr_t)) +
+ sizeof(struct cpt_request_info);
+ switch (sess->xfrm_type) {
+ case RTE_CRYPTO_ASYM_XFORM_MODEX:
+ ret = cpt_modex_prep(&params, &sess->mod_ctx);
+ if (unlikely(ret))
+ goto req_fail;
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_RSA:
+ ret = cpt_enqueue_rsa_op(op, &params, sess);
+ if (unlikely(ret))
+ goto req_fail;
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_ECDSA:
+ ret = cpt_enqueue_ecdsa_op(op, &params, sess, otx2_fpm_iova);
+ if (unlikely(ret))
+ goto req_fail;
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_ECPM:
+ ret = cpt_ecpm_prep(&asym_op->ecpm, &params,
+ sess->ec_ctx.curveid);
+ if (unlikely(ret))
+ goto req_fail;
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ ret = -EINVAL;
+ goto req_fail;
+ }
+
+ /* Set engine group of AE */
+ w3 = (vq_cmd_word3_t *)&params.req->ist.ei3;
+ w3->s.grp = OTX2_CPT_EGRP_AE;
+
+ ret = otx2_cpt_enqueue_req(qp, pend_q, params.req);
+
+ if (unlikely(ret)) {
+ CPT_LOG_DP_ERR("Could not enqueue crypto req");
+ goto req_fail;
+ }
+
+ return 0;
+
+req_fail:
+ free_op_meta(mdata, minfo->pool);
+
+ return ret;
+}
+
+static __rte_always_inline int __rte_hot
+otx2_cpt_enqueue_sym(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
+ struct pending_queue *pend_q)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct cpt_request_info *req;
+ struct cpt_sess_misc *sess;
+ vq_cmd_word3_t *w3;
+ uint64_t cpt_op;
+ void *mdata;
+ int ret;
+
+ sess = get_sym_session_private_data(sym_op->session,
+ otx2_cryptodev_driver_id);
+
+ cpt_op = sess->cpt_op;
+
+ if (cpt_op & CPT_OP_CIPHER_MASK)
+ ret = fill_fc_params(op, sess, &qp->meta_info, &mdata,
+ (void **)&req);
+ else
+ ret = fill_digest_params(op, sess, &qp->meta_info, &mdata,
+ (void **)&req);
+
+ if (unlikely(ret)) {
+ CPT_LOG_DP_ERR("Crypto req : op %p, cpt_op 0x%x ret 0x%x",
+ op, (unsigned int)cpt_op, ret);
+ return ret;
+ }
+
+ w3 = ((vq_cmd_word3_t *)(&req->ist.ei3));
+ w3->s.grp = sess->egrp;
+
+ ret = otx2_cpt_enqueue_req(qp, pend_q, req);
+
+ if (unlikely(ret)) {
+ /* Free buffer allocated by fill params routines */
+ free_op_meta(mdata, qp->meta_info.pool);
+ }
+
+ return ret;
+}
+
+static __rte_always_inline int __rte_hot
+otx2_cpt_enqueue_sym_sessless(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
+ struct pending_queue *pend_q)
+{
+ const int driver_id = otx2_cryptodev_driver_id;
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct rte_cryptodev_sym_session *sess;
+ int ret;
+
+ /* Create temporary session */
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&sess))
+ return -ENOMEM;
+
+ ret = sym_session_configure(driver_id, sym_op->xform, sess,
+ qp->sess_mp_priv);
+ if (ret)
+ goto sess_put;
+
+ sym_op->session = sess;
+
+ ret = otx2_cpt_enqueue_sym(qp, op, pend_q);
+
+ if (unlikely(ret))
+ goto priv_put;
+
+ return 0;
+
+priv_put:
+ sym_session_clear(driver_id, sess);
+sess_put:
+ rte_mempool_put(qp->sess_mp, sess);
+ return ret;
+}
+
+static uint16_t
+otx2_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ uint16_t nb_allowed, count = 0;
+ struct otx2_cpt_qp *qp = qptr;
+ struct pending_queue *pend_q;
+ struct rte_crypto_op *op;
+ int ret;
+
+ pend_q = &qp->pend_q;
+
+ nb_allowed = OTX2_CPT_DEFAULT_CMD_QLEN - pend_q->pending_count;
+ if (nb_ops > nb_allowed)
+ nb_ops = nb_allowed;
+
+ for (count = 0; count < nb_ops; count++) {
+ op = ops[count];
+ if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+ ret = otx2_cpt_enqueue_sym(qp, op, pend_q);
+ else
+ ret = otx2_cpt_enqueue_sym_sessless(qp, op,
+ pend_q);
+ } else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+ ret = otx2_cpt_enqueue_asym(qp, op, pend_q);
+ else
+ break;
+ } else
+ break;
+
+ if (unlikely(ret))
+ break;
+ }
+
+ return count;
+}
+
+static __rte_always_inline void
+otx2_cpt_asym_rsa_op(struct rte_crypto_op *cop, struct cpt_request_info *req,
+ struct rte_crypto_rsa_xform *rsa_ctx)
+{
+ struct rte_crypto_rsa_op_param *rsa = &cop->asym->rsa;
+
+ switch (rsa->op_type) {
+ case RTE_CRYPTO_ASYM_OP_ENCRYPT:
+ rsa->cipher.length = rsa_ctx->n.length;
+ memcpy(rsa->cipher.data, req->rptr, rsa->cipher.length);
+ break;
+ case RTE_CRYPTO_ASYM_OP_DECRYPT:
+ if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
+ rsa->message.length = rsa_ctx->n.length;
+ memcpy(rsa->message.data, req->rptr,
+ rsa->message.length);
+ } else {
+ /* Get length of decrypted output */
+ rsa->message.length = rte_cpu_to_be_16
+ (*((uint16_t *)req->rptr));
+ /*
+ * Offset output data pointer by length field
+ * (2 bytes) and copy decrypted data.
+ */
+ memcpy(rsa->message.data, req->rptr + 2,
+ rsa->message.length);
+ }
+ break;
+ case RTE_CRYPTO_ASYM_OP_SIGN:
+ rsa->sign.length = rsa_ctx->n.length;
+ memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
+ break;
+ case RTE_CRYPTO_ASYM_OP_VERIFY:
+ if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
+ rsa->sign.length = rsa_ctx->n.length;
+ memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
+ } else {
+ /* Get length of signed output */
+ rsa->sign.length = rte_cpu_to_be_16
+ (*((uint16_t *)req->rptr));
+ /*
+ * Offset output data pointer by length field
+ * (2 bytes) and copy signed data.
+ */
+ memcpy(rsa->sign.data, req->rptr + 2,
+ rsa->sign.length);
+ }
+ if (memcmp(rsa->sign.data, rsa->message.data,
+ rsa->message.length)) {
+ CPT_LOG_DP_ERR("RSA verification failed");
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+ break;
+ default:
+ CPT_LOG_DP_DEBUG("Invalid RSA operation type");
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+}
+
+static __rte_always_inline void
+otx2_cpt_asym_dequeue_ecdsa_op(struct rte_crypto_ecdsa_op_param *ecdsa,
+ struct cpt_request_info *req,
+ struct cpt_asym_ec_ctx *ec)
+{
+ int prime_len = ec_grp[ec->curveid].prime.length;
+
+ if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY)
+ return;
+
+ /* Separate out sign r and s components */
+ memcpy(ecdsa->r.data, req->rptr, prime_len);
+ memcpy(ecdsa->s.data, req->rptr + ROUNDUP8(prime_len), prime_len);
+ ecdsa->r.length = prime_len;
+ ecdsa->s.length = prime_len;
+}
+
+static __rte_always_inline void
+otx2_cpt_asym_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm,
+ struct cpt_request_info *req,
+ struct cpt_asym_ec_ctx *ec)
+{
+ int prime_len = ec_grp[ec->curveid].prime.length;
+
+ memcpy(ecpm->r.x.data, req->rptr, prime_len);
+ memcpy(ecpm->r.y.data, req->rptr + ROUNDUP8(prime_len), prime_len);
+ ecpm->r.x.length = prime_len;
+ ecpm->r.y.length = prime_len;
+}
+
+static void
+otx2_cpt_asym_post_process(struct rte_crypto_op *cop,
+ struct cpt_request_info *req)
+{
+ struct rte_crypto_asym_op *op = cop->asym;
+ struct cpt_asym_sess_misc *sess;
+
+ sess = get_asym_session_private_data(op->session,
+ otx2_cryptodev_driver_id);
+
+ switch (sess->xfrm_type) {
+ case RTE_CRYPTO_ASYM_XFORM_RSA:
+ otx2_cpt_asym_rsa_op(cop, req, &sess->rsa_ctx);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_MODEX:
+ op->modex.result.length = sess->mod_ctx.modulus.length;
+ memcpy(op->modex.result.data, req->rptr,
+ op->modex.result.length);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_ECDSA:
+ otx2_cpt_asym_dequeue_ecdsa_op(&op->ecdsa, req, &sess->ec_ctx);
+ break;
+ case RTE_CRYPTO_ASYM_XFORM_ECPM:
+ otx2_cpt_asym_dequeue_ecpm_op(&op->ecpm, req, &sess->ec_ctx);
+ break;
+ default:
+ CPT_LOG_DP_DEBUG("Invalid crypto xform type");
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ break;
+ }
+}
+
+static inline void
+otx2_cpt_dequeue_post_process(struct otx2_cpt_qp *qp, struct rte_crypto_op *cop,
+ uintptr_t *rsp, uint8_t cc)
+{
+ if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+ if (likely(cc == NO_ERR)) {
+ /* Verify authentication data if required */
+ if (unlikely(rsp[2]))
+ compl_auth_verify(cop, (uint8_t *)rsp[2],
+ rsp[3]);
+ else
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else {
+ if (cc == ERR_GC_ICV_MISCOMPARE)
+ cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ else
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+
+ if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ sym_session_clear(otx2_cryptodev_driver_id,
+ cop->sym->session);
+ rte_mempool_put(qp->sess_mp, cop->sym->session);
+ cop->sym->session = NULL;
+ }
+ }
+
+ if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+ if (likely(cc == NO_ERR)) {
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ /*
+ * Pass cpt_req_info stored in metabuf during
+ * enqueue.
+ */
+ rsp = RTE_PTR_ADD(rsp, 4 * sizeof(uintptr_t));
+ otx2_cpt_asym_post_process(cop,
+ (struct cpt_request_info *)rsp);
+ } else
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+}
+
+static __rte_always_inline uint8_t
+otx2_cpt_compcode_get(struct cpt_request_info *req)
+{
+ volatile struct cpt_res_s_9s *res;
+ uint8_t ret;
+
+ res = (volatile struct cpt_res_s_9s *)req->completion_addr;
+
+ if (unlikely(res->compcode == CPT_9X_COMP_E_NOTDONE)) {
+ if (rte_get_timer_cycles() < req->time_out)
+ return ERR_REQ_PENDING;
+
+ CPT_LOG_DP_ERR("Request timed out");
+ return ERR_REQ_TIMEOUT;
+ }
+
+ if (likely(res->compcode == CPT_9X_COMP_E_GOOD)) {
+ ret = NO_ERR;
+ if (unlikely(res->uc_compcode)) {
+ ret = res->uc_compcode;
+ CPT_LOG_DP_DEBUG("Request failed with microcode error");
+ CPT_LOG_DP_DEBUG("MC completion code 0x%x",
+ res->uc_compcode);
+ }
+ } else {
+ CPT_LOG_DP_DEBUG("HW completion code 0x%x", res->compcode);
+
+ ret = res->compcode;
+ switch (res->compcode) {
+ case CPT_9X_COMP_E_INSTERR:
+ CPT_LOG_DP_ERR("Request failed with instruction error");
+ break;
+ case CPT_9X_COMP_E_FAULT:
+ CPT_LOG_DP_ERR("Request failed with DMA fault");
+ break;
+ case CPT_9X_COMP_E_HWERR:
+ CPT_LOG_DP_ERR("Request failed with hardware error");
+ break;
+ default:
+ CPT_LOG_DP_ERR("Request failed with unknown completion code");
+ }
+ }
+
+ return ret;
+}
+
+static uint16_t
+otx2_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int i, nb_pending, nb_completed;
+ struct otx2_cpt_qp *qp = qptr;
+ struct pending_queue *pend_q;
+ struct cpt_request_info *req;
+ struct rte_crypto_op *cop;
+ uint8_t cc[nb_ops];
+ struct rid *rid;
+ uintptr_t *rsp;
+ void *metabuf;
+
+ pend_q = &qp->pend_q;
+
+ nb_pending = pend_q->pending_count;
+
+ if (nb_ops > nb_pending)
+ nb_ops = nb_pending;
+
+ for (i = 0; i < nb_ops; i++) {
+ rid = &pend_q->rid_queue[pend_q->deq_head];
+ req = (struct cpt_request_info *)(rid->rid);
+
+ cc[i] = otx2_cpt_compcode_get(req);
+
+ if (unlikely(cc[i] == ERR_REQ_PENDING))
+ break;
+
+ ops[i] = req->op;
+
+ MOD_INC(pend_q->deq_head, OTX2_CPT_DEFAULT_CMD_QLEN);
+ pend_q->pending_count -= 1;
+ }
+
+ nb_completed = i;
+
+ for (i = 0; i < nb_completed; i++) {
+ rsp = (void *)ops[i];
+
+ metabuf = (void *)rsp[0];
+ cop = (void *)rsp[1];
+
+ ops[i] = cop;
+
+ otx2_cpt_dequeue_post_process(qp, cop, rsp, cc[i]);
+
+ free_op_meta(metabuf, qp->meta_info.pool);
+ }
+
+ return nb_completed;
+}
+
+/* PMD ops */
+
+static int
+otx2_cpt_dev_config(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *conf)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ int ret;
+
+ if (conf->nb_queue_pairs > vf->max_queues) {
+ CPT_LOG_ERR("Invalid number of queue pairs requested");
+ return -EINVAL;
+ }
+
+ dev->feature_flags &= ~conf->ff_disable;
+
+ if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
+ /* Initialize shared FPM table */
+ ret = cpt_fpm_init(otx2_fpm_iova);
+ if (ret)
+ return ret;
+ }
+
+ /* Unregister error interrupts */
+ if (vf->err_intr_registered)
+ otx2_cpt_err_intr_unregister(dev);
+
+ /* Detach queues */
+ if (vf->nb_queues) {
+ ret = otx2_cpt_queues_detach(dev);
+ if (ret) {
+ CPT_LOG_ERR("Could not detach CPT queues");
+ return ret;
+ }
+ }
+
+ /* Attach queues */
+ ret = otx2_cpt_queues_attach(dev, conf->nb_queue_pairs);
+ if (ret) {
+ CPT_LOG_ERR("Could not attach CPT queues");
+ return -ENODEV;
+ }
+
+ ret = otx2_cpt_msix_offsets_get(dev);
+ if (ret) {
+ CPT_LOG_ERR("Could not get MSI-X offsets");
+ goto queues_detach;
+ }
+
+ /* Register error interrupts */
+ ret = otx2_cpt_err_intr_register(dev);
+ if (ret) {
+ CPT_LOG_ERR("Could not register error interrupts");
+ goto queues_detach;
+ }
+
+ ret = otx2_cpt_inline_init(dev);
+ if (ret) {
+ CPT_LOG_ERR("Could not enable inline IPsec");
+ goto intr_unregister;
+ }
+
+ dev->enqueue_burst = otx2_cpt_enqueue_burst;
+ dev->dequeue_burst = otx2_cpt_dequeue_burst;
+
+ rte_mb();
+ return 0;
+
+intr_unregister:
+ otx2_cpt_err_intr_unregister(dev);
+queues_detach:
+ otx2_cpt_queues_detach(dev);
+ return ret;
+}
+
+static int
+otx2_cpt_dev_start(struct rte_cryptodev *dev)
+{
+ RTE_SET_USED(dev);
+
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ return 0;
+}
+
+static void
+otx2_cpt_dev_stop(struct rte_cryptodev *dev)
+{
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)
+ cpt_fpm_clear();
+}
+
+static int
+otx2_cpt_dev_close(struct rte_cryptodev *dev)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+ int i, ret = 0;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = otx2_cpt_queue_pair_release(dev, i);
+ if (ret)
+ return ret;
+ }
+
+ /* Unregister error interrupts */
+ if (vf->err_intr_registered)
+ otx2_cpt_err_intr_unregister(dev);
+
+ /* Detach queues */
+ if (vf->nb_queues) {
+ ret = otx2_cpt_queues_detach(dev);
+ if (ret)
+ CPT_LOG_ERR("Could not detach CPT queues");
+ }
+
+ return ret;
+}
+
+static void
+otx2_cpt_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct otx2_cpt_vf *vf = dev->data->dev_private;
+
+ if (info != NULL) {
+ info->max_nb_queue_pairs = vf->max_queues;
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = otx2_cpt_capabilities_get();
+ info->sym.max_nb_sessions = 0;
+ info->driver_id = otx2_cryptodev_driver_id;
+ info->min_mbuf_headroom_req = OTX2_CPT_MIN_HEADROOM_REQ;
+ info->min_mbuf_tailroom_req = OTX2_CPT_MIN_TAILROOM_REQ;
+ }
+}
+
+static int
+otx2_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *conf,
+ int socket_id __rte_unused)
+{
+ uint8_t grp_mask = OTX2_CPT_ENG_GRPS_MASK;
+ struct rte_pci_device *pci_dev;
+ struct otx2_cpt_qp *qp;
+
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ otx2_cpt_queue_pair_release(dev, qp_id);
+
+ if (conf->nb_descriptors > OTX2_CPT_DEFAULT_CMD_QLEN) {
+ CPT_LOG_ERR("Could not setup queue pair for %u descriptors",
+ conf->nb_descriptors);
+ return -EINVAL;
+ }
+
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ if (pci_dev->mem_resource[2].addr == NULL) {
+ CPT_LOG_ERR("Invalid PCI mem address");
+ return -EIO;
+ }
+
+ qp = otx2_cpt_qp_create(dev, qp_id, grp_mask);
+ if (qp == NULL) {
+ CPT_LOG_ERR("Could not create queue pair %d", qp_id);
+ return -ENOMEM;
+ }
+
+ qp->sess_mp = conf->mp_session;
+ qp->sess_mp_priv = conf->mp_session_private;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ return 0;
+}
+
+static int
+otx2_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct otx2_cpt_qp *qp = dev->data->queue_pairs[qp_id];
+ int ret;
+
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ if (qp == NULL)
+ return -EINVAL;
+
+ CPT_LOG_INFO("Releasing queue pair %d", qp_id);
+
+ ret = otx2_cpt_qp_destroy(dev, qp);
+ if (ret) {
+ CPT_LOG_ERR("Could not destroy queue pair %d", qp_id);
+ return ret;
+ }
+
+ dev->data->queue_pairs[qp_id] = NULL;
+
+ return 0;
+}
+
+static unsigned int
+otx2_cpt_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return cpt_get_session_size();
+}
+
+static int
+otx2_cpt_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *pool)
+{
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ return sym_session_configure(dev->driver_id, xform, sess, pool);
+}
+
+static void
+otx2_cpt_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ return sym_session_clear(dev->driver_id, sess);
+}
+
+static unsigned int
+otx2_cpt_asym_session_size_get(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct cpt_asym_sess_misc);
+}
+
+static int
+otx2_cpt_asym_session_cfg(struct rte_cryptodev *dev,
+ struct rte_crypto_asym_xform *xform,
+ struct rte_cryptodev_asym_session *sess,
+ struct rte_mempool *pool)
+{
+ struct cpt_asym_sess_misc *priv;
+ int ret;
+
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ if (rte_mempool_get(pool, (void **)&priv)) {
+ CPT_LOG_ERR("Could not allocate session_private_data");
+ return -ENOMEM;
+ }
+
+ memset(priv, 0, sizeof(struct cpt_asym_sess_misc));
+
+ ret = cpt_fill_asym_session_parameters(priv, xform);
+ if (ret) {
+ CPT_LOG_ERR("Could not configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(pool, priv);
+ return ret;
+ }
+
+ set_asym_session_private_data(sess, dev->driver_id, priv);
+ return 0;
+}
+
+static void
+otx2_cpt_asym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_asym_session *sess)
+{
+ struct cpt_asym_sess_misc *priv;
+ struct rte_mempool *sess_mp;
+
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ priv = get_asym_session_private_data(sess, dev->driver_id);
+ if (priv == NULL)
+ return;
+
+ /* Free resources allocated in session_cfg */
+ cpt_free_asym_session_parameters(priv);
+
+ /* Reset and free object back to pool */
+ memset(priv, 0, otx2_cpt_asym_session_size_get(dev));
+ sess_mp = rte_mempool_from_obj(priv);
+ set_asym_session_private_data(sess, dev->driver_id, NULL);
+ rte_mempool_put(sess_mp, priv);
+}
+
+struct rte_cryptodev_ops otx2_cpt_ops = {
+ /* Device control ops */
+ .dev_configure = otx2_cpt_dev_config,
+ .dev_start = otx2_cpt_dev_start,
+ .dev_stop = otx2_cpt_dev_stop,
+ .dev_close = otx2_cpt_dev_close,
+ .dev_infos_get = otx2_cpt_dev_info_get,
+
+ .stats_get = NULL,
+ .stats_reset = NULL,
+ .queue_pair_setup = otx2_cpt_queue_pair_setup,
+ .queue_pair_release = otx2_cpt_queue_pair_release,
+
+ /* Symmetric crypto ops */
+ .sym_session_get_size = otx2_cpt_sym_session_get_size,
+ .sym_session_configure = otx2_cpt_sym_session_configure,
+ .sym_session_clear = otx2_cpt_sym_session_clear,
+
+ /* Asymmetric crypto ops */
+ .asym_session_get_size = otx2_cpt_asym_session_size_get,
+ .asym_session_configure = otx2_cpt_asym_session_cfg,
+ .asym_session_clear = otx2_cpt_asym_session_clear,
+
+};
diff --git a/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_ops.h b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_ops.h
new file mode 100644
index 000000000..f83e36b48
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_ops.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_CRYPTODEV_OPS_H_
+#define _OTX2_CRYPTODEV_OPS_H_
+
+#include <rte_cryptodev_pmd.h>
+
+#define OTX2_CPT_MIN_HEADROOM_REQ 24
+#define OTX2_CPT_MIN_TAILROOM_REQ 8
+
+enum otx2_cpt_egrp {
+ OTX2_CPT_EGRP_SE = 0,
+ OTX2_CPT_EGRP_SE_IE = 1,
+ OTX2_CPT_EGRP_AE = 2
+};
+
+extern struct rte_cryptodev_ops otx2_cpt_ops;
+
+#endif /* _OTX2_CRYPTODEV_OPS_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_qp.h b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_qp.h
new file mode 100644
index 000000000..9d48da45f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_cryptodev_qp.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef _OTX2_CRYPTODEV_QP_H_
+#define _OTX2_CRYPTODEV_QP_H_
+
+#include <rte_common.h>
+#include <rte_mempool.h>
+#include <rte_spinlock.h>
+
+#include "cpt_common.h"
+
+struct otx2_cpt_qp {
+ uint32_t id;
+ /**< Queue pair id */
+ uintptr_t base;
+ /**< Base address where BAR is mapped */
+ void *lmtline;
+ /**< Address of LMTLINE */
+ rte_iova_t lf_nq_reg;
+ /**< LF enqueue register address */
+ struct pending_queue pend_q;
+ /**< Pending queue */
+ struct rte_mempool *sess_mp;
+ /**< Session mempool */
+ struct rte_mempool *sess_mp_priv;
+ /**< Session private data mempool */
+ struct cpt_qp_meta_info meta_info;
+ /**< Metabuf info required to support operations on the queue pair */
+ rte_iova_t iq_dma_addr;
+ /**< Instruction queue address */
+};
+
+#endif /* _OTX2_CRYPTODEV_QP_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_ipsec_fp.h b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_ipsec_fp.h
new file mode 100644
index 000000000..52b3b41e2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/octeontx2/otx2_ipsec_fp.h
@@ -0,0 +1,350 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_IPSEC_FP_H__
+#define __OTX2_IPSEC_FP_H__
+
+#include <rte_crypto_sym.h>
+#include <rte_security.h>
+
+enum {
+ OTX2_IPSEC_FP_SA_DIRECTION_INBOUND = 0,
+ OTX2_IPSEC_FP_SA_DIRECTION_OUTBOUND = 1,
+};
+
+enum {
+ OTX2_IPSEC_FP_SA_IP_VERSION_4 = 0,
+ OTX2_IPSEC_FP_SA_IP_VERSION_6 = 1,
+};
+
+enum {
+ OTX2_IPSEC_FP_SA_MODE_TRANSPORT = 0,
+ OTX2_IPSEC_FP_SA_MODE_TUNNEL = 1,
+};
+
+enum {
+ OTX2_IPSEC_FP_SA_PROTOCOL_AH = 0,
+ OTX2_IPSEC_FP_SA_PROTOCOL_ESP = 1,
+};
+
+enum {
+ OTX2_IPSEC_FP_SA_AES_KEY_LEN_128 = 1,
+ OTX2_IPSEC_FP_SA_AES_KEY_LEN_192 = 2,
+ OTX2_IPSEC_FP_SA_AES_KEY_LEN_256 = 3,
+};
+
+enum {
+ OTX2_IPSEC_FP_SA_ENC_NULL = 0,
+ OTX2_IPSEC_FP_SA_ENC_DES_CBC = 1,
+ OTX2_IPSEC_FP_SA_ENC_3DES_CBC = 2,
+ OTX2_IPSEC_FP_SA_ENC_AES_CBC = 3,
+ OTX2_IPSEC_FP_SA_ENC_AES_CTR = 4,
+ OTX2_IPSEC_FP_SA_ENC_AES_GCM = 5,
+ OTX2_IPSEC_FP_SA_ENC_AES_CCM = 6,
+};
+
+enum {
+ OTX2_IPSEC_FP_SA_AUTH_NULL = 0,
+ OTX2_IPSEC_FP_SA_AUTH_MD5 = 1,
+ OTX2_IPSEC_FP_SA_AUTH_SHA1 = 2,
+ OTX2_IPSEC_FP_SA_AUTH_SHA2_224 = 3,
+ OTX2_IPSEC_FP_SA_AUTH_SHA2_256 = 4,
+ OTX2_IPSEC_FP_SA_AUTH_SHA2_384 = 5,
+ OTX2_IPSEC_FP_SA_AUTH_SHA2_512 = 6,
+ OTX2_IPSEC_FP_SA_AUTH_AES_GMAC = 7,
+ OTX2_IPSEC_FP_SA_AUTH_AES_XCBC_128 = 8,
+};
+
+enum {
+ OTX2_IPSEC_FP_SA_FRAG_POST = 0,
+ OTX2_IPSEC_FP_SA_FRAG_PRE = 1,
+};
+
+enum {
+ OTX2_IPSEC_FP_SA_ENCAP_NONE = 0,
+ OTX2_IPSEC_FP_SA_ENCAP_UDP = 1,
+};
+
+struct otx2_ipsec_fp_sa_ctl {
+ rte_be32_t spi : 32;
+ uint64_t exp_proto_inter_frag : 8;
+ uint64_t rsvd_42_40 : 3;
+ uint64_t esn_en : 1;
+ uint64_t rsvd_45_44 : 2;
+ uint64_t encap_type : 2;
+ uint64_t enc_type : 3;
+ uint64_t rsvd_48 : 1;
+ uint64_t auth_type : 4;
+ uint64_t valid : 1;
+ uint64_t direction : 1;
+ uint64_t outer_ip_ver : 1;
+ uint64_t inner_ip_ver : 1;
+ uint64_t ipsec_mode : 1;
+ uint64_t ipsec_proto : 1;
+ uint64_t aes_key_len : 2;
+};
+
+struct otx2_ipsec_fp_out_sa {
+ /* w0 */
+ struct otx2_ipsec_fp_sa_ctl ctl;
+
+ /* w1 */
+ uint8_t nonce[4];
+ uint16_t udp_src;
+ uint16_t udp_dst;
+
+ /* w2 */
+ uint32_t ip_src;
+ uint32_t ip_dst;
+
+ /* w3-w6 */
+ uint8_t cipher_key[32];
+
+ /* w7-w12 */
+ uint8_t hmac_key[48];
+};
+
+struct otx2_ipsec_fp_in_sa {
+ /* w0 */
+ struct otx2_ipsec_fp_sa_ctl ctl;
+
+ /* w1 */
+ uint8_t nonce[4]; /* Only for AES-GCM */
+ uint32_t unused;
+
+ /* w2 */
+ uint32_t esn_low;
+ uint32_t esn_hi;
+
+ /* w3-w6 */
+ uint8_t cipher_key[32];
+
+ /* w7-w12 */
+ uint8_t hmac_key[48];
+
+ RTE_STD_C11
+ union {
+ void *userdata;
+ uint64_t udata64;
+ };
+
+ uint64_t reserved1;
+ uint64_t reserved2;
+};
+
+static inline int
+ipsec_fp_xform_cipher_verify(struct rte_crypto_sym_xform *xform)
+{
+ if (xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
+ switch (xform->cipher.key.length) {
+ case 16:
+ case 24:
+ case 32:
+ break;
+ default:
+ return -ENOTSUP;
+ }
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+static inline int
+ipsec_fp_xform_auth_verify(struct rte_crypto_sym_xform *xform)
+{
+ uint16_t keylen = xform->auth.key.length;
+
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC) {
+ if (keylen >= 20 && keylen <= 64)
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+static inline int
+ipsec_fp_xform_aead_verify(struct rte_security_ipsec_xform *ipsec,
+ struct rte_crypto_sym_xform *xform)
+{
+ if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS &&
+ xform->aead.op != RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ return -EINVAL;
+
+ if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
+ xform->aead.op != RTE_CRYPTO_AEAD_OP_DECRYPT)
+ return -EINVAL;
+
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ switch (xform->aead.key.length) {
+ case 16:
+ case 24:
+ case 32:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+static inline int
+ipsec_fp_xform_verify(struct rte_security_ipsec_xform *ipsec,
+ struct rte_crypto_sym_xform *xform)
+{
+ struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
+ int ret;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+ return ipsec_fp_xform_aead_verify(ipsec, xform);
+
+ if (xform->next == NULL)
+ return -EINVAL;
+
+ if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+ /* Ingress */
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
+ xform->next->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return -EINVAL;
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ } else {
+ /* Egress */
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
+ xform->next->type != RTE_CRYPTO_SYM_XFORM_AUTH)
+ return -EINVAL;
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ }
+
+ ret = ipsec_fp_xform_cipher_verify(cipher_xform);
+ if (ret)
+ return ret;
+
+ ret = ipsec_fp_xform_auth_verify(auth_xform);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static inline int
+ipsec_fp_sa_ctl_set(struct rte_security_ipsec_xform *ipsec,
+ struct rte_crypto_sym_xform *xform,
+ struct otx2_ipsec_fp_sa_ctl *ctl)
+{
+ struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
+ int aes_key_len;
+
+ if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ ctl->direction = OTX2_IPSEC_FP_SA_DIRECTION_OUTBOUND;
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+ ctl->direction = OTX2_IPSEC_FP_SA_DIRECTION_INBOUND;
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ } else {
+ return -EINVAL;
+ }
+
+ if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
+ if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
+ ctl->outer_ip_ver = OTX2_IPSEC_FP_SA_IP_VERSION_4;
+ else if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV6)
+ ctl->outer_ip_ver = OTX2_IPSEC_FP_SA_IP_VERSION_6;
+ else
+ return -EINVAL;
+ }
+
+ ctl->inner_ip_ver = OTX2_IPSEC_FP_SA_IP_VERSION_4;
+
+ if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT)
+ ctl->ipsec_mode = OTX2_IPSEC_FP_SA_MODE_TRANSPORT;
+ else if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+ ctl->ipsec_mode = OTX2_IPSEC_FP_SA_MODE_TUNNEL;
+ else
+ return -EINVAL;
+
+ if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
+ ctl->ipsec_proto = OTX2_IPSEC_FP_SA_PROTOCOL_AH;
+ else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
+ ctl->ipsec_proto = OTX2_IPSEC_FP_SA_PROTOCOL_ESP;
+ else
+ return -EINVAL;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ ctl->enc_type = OTX2_IPSEC_FP_SA_ENC_AES_GCM;
+ aes_key_len = xform->aead.key.length;
+ } else {
+ return -ENOTSUP;
+ }
+ } else if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
+ ctl->enc_type = OTX2_IPSEC_FP_SA_ENC_AES_CBC;
+ aes_key_len = cipher_xform->cipher.key.length;
+ } else {
+ return -ENOTSUP;
+ }
+
+ switch (aes_key_len) {
+ case 16:
+ ctl->aes_key_len = OTX2_IPSEC_FP_SA_AES_KEY_LEN_128;
+ break;
+ case 24:
+ ctl->aes_key_len = OTX2_IPSEC_FP_SA_AES_KEY_LEN_192;
+ break;
+ case 32:
+ ctl->aes_key_len = OTX2_IPSEC_FP_SA_AES_KEY_LEN_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
+ switch (auth_xform->auth.algo) {
+ case RTE_CRYPTO_AUTH_NULL:
+ ctl->auth_type = OTX2_IPSEC_FP_SA_AUTH_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ ctl->auth_type = OTX2_IPSEC_FP_SA_AUTH_MD5;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ ctl->auth_type = OTX2_IPSEC_FP_SA_AUTH_SHA1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ ctl->auth_type = OTX2_IPSEC_FP_SA_AUTH_SHA2_224;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ ctl->auth_type = OTX2_IPSEC_FP_SA_AUTH_SHA2_256;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ ctl->auth_type = OTX2_IPSEC_FP_SA_AUTH_SHA2_384;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ ctl->auth_type = OTX2_IPSEC_FP_SA_AUTH_SHA2_512;
+ break;
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ ctl->auth_type = OTX2_IPSEC_FP_SA_AUTH_AES_GMAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ ctl->auth_type = OTX2_IPSEC_FP_SA_AUTH_AES_XCBC_128;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+ }
+
+ if (ipsec->options.esn == 1)
+ ctl->esn_en = 1;
+
+ ctl->spi = rte_cpu_to_be_32(ipsec->spi);
+ ctl->valid = 1;
+
+ return 0;
+}
+
+#endif /* __OTX2_IPSEC_FP_H__ */
diff --git a/src/spdk/dpdk/drivers/crypto/octeontx2/rte_pmd_octeontx2_crypto_version.map b/src/spdk/dpdk/drivers/crypto/octeontx2/rte_pmd_octeontx2_crypto_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/octeontx2/rte_pmd_octeontx2_crypto_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};