summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/crypto/nitrox
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/spdk/dpdk/drivers/crypto/nitrox
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/drivers/crypto/nitrox')
-rw-r--r--src/spdk/dpdk/drivers/crypto/nitrox/Makefile30
-rw-r--r--src/spdk/dpdk/drivers/crypto/nitrox/meson.build18
-rw-r--r--src/spdk/dpdk/drivers/crypto/nitrox/nitrox_csr.h40
-rw-r--r--src/spdk/dpdk/drivers/crypto/nitrox/nitrox_device.c124
-rw-r--r--src/spdk/dpdk/drivers/crypto/nitrox/nitrox_device.h22
-rw-r--r--src/spdk/dpdk/drivers/crypto/nitrox/nitrox_hal.c236
-rw-r--r--src/spdk/dpdk/drivers/crypto/nitrox/nitrox_hal.h165
-rw-r--r--src/spdk/dpdk/drivers/crypto/nitrox/nitrox_logs.c14
-rw-r--r--src/spdk/dpdk/drivers/crypto/nitrox/nitrox_logs.h15
-rw-r--r--src/spdk/dpdk/drivers/crypto/nitrox/nitrox_qp.c115
-rw-r--r--src/spdk/dpdk/drivers/crypto/nitrox/nitrox_qp.h104
-rw-r--r--src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym.c733
-rw-r--r--src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym.h13
-rw-r--r--src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym_capabilities.c118
-rw-r--r--src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym_capabilities.h12
-rw-r--r--src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym_ctx.h84
-rw-r--r--src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym_reqmgr.c635
-rw-r--r--src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym_reqmgr.h23
-rw-r--r--src/spdk/dpdk/drivers/crypto/nitrox/rte_pmd_nitrox_version.map3
19 files changed, 2504 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/crypto/nitrox/Makefile b/src/spdk/dpdk/drivers/crypto/nitrox/Makefile
new file mode 100644
index 000000000..535121196
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/nitrox/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_nitrox.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# versioning export map
+EXPORT_MAP := rte_pmd_nitrox_version.map
+
+# external library dependencies
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_pci -lrte_bus_pci
+LDLIBS += -lrte_cryptodev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NITROX) += nitrox_device.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NITROX) += nitrox_hal.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NITROX) += nitrox_logs.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NITROX) += nitrox_sym.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NITROX) += nitrox_sym_capabilities.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NITROX) += nitrox_sym_reqmgr.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NITROX) += nitrox_qp.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/nitrox/meson.build b/src/spdk/dpdk/drivers/crypto/nitrox/meson.build
new file mode 100644
index 000000000..9c0e89c2c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/nitrox/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+
+if not is_linux
+ build = false
+ reason = 'only supported on Linux'
+endif
+
+deps += ['bus_pci']
+sources = files(
+ 'nitrox_device.c',
+ 'nitrox_hal.c',
+ 'nitrox_logs.c',
+ 'nitrox_sym.c',
+ 'nitrox_sym_capabilities.c',
+ 'nitrox_sym_reqmgr.c',
+ 'nitrox_qp.c'
+ )
diff --git a/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_csr.h b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_csr.h
new file mode 100644
index 000000000..de7a3c671
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_csr.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _NITROX_CSR_H_
+#define _NITROX_CSR_H_
+
+#include <rte_common.h>
+#include <rte_io.h>
+
+#define CSR_DELAY 30
+#define NITROX_CSR_ADDR(bar_addr, offset) (bar_addr + (offset))
+
+/* NPS packet registers */
+#define NPS_PKT_IN_INSTR_CTLX(_i) (0x10060UL + ((_i) * 0x40000UL))
+#define NPS_PKT_IN_INSTR_BADDRX(_i) (0x10068UL + ((_i) * 0x40000UL))
+#define NPS_PKT_IN_INSTR_RSIZEX(_i) (0x10070UL + ((_i) * 0x40000UL))
+#define NPS_PKT_IN_DONE_CNTSX(_i) (0x10080UL + ((_i) * 0x40000UL))
+#define NPS_PKT_IN_INSTR_BAOFF_DBELLX(_i) (0x10078UL + ((_i) * 0x40000UL))
+#define NPS_PKT_IN_INT_LEVELSX(_i) (0x10088UL + ((_i) * 0x40000UL))
+#define NPS_PKT_SLC_CTLX(_i) (0x10000UL + ((_i) * 0x40000UL))
+#define NPS_PKT_SLC_CNTSX(_i) (0x10008UL + ((_i) * 0x40000UL))
+#define NPS_PKT_SLC_INT_LEVELSX(_i) (0x10010UL + ((_i) * 0x40000UL))
+
+/* AQM Virtual Function Registers */
+#define AQMQ_QSZX(_i) (0x20008UL + ((_i) * 0x40000UL))
+
+static inline uint64_t
+nitrox_read_csr(uint8_t *bar_addr, uint64_t offset)
+{
+ return rte_read64(bar_addr + offset);
+}
+
+static inline void
+nitrox_write_csr(uint8_t *bar_addr, uint64_t offset, uint64_t value)
+{
+ rte_write64(value, (bar_addr + offset));
+}
+
+#endif /* _NITROX_CSR_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_device.c b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_device.c
new file mode 100644
index 000000000..5b319dd68
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_device.c
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_malloc.h>
+
+#include "nitrox_device.h"
+#include "nitrox_hal.h"
+#include "nitrox_sym.h"
+
+#define PCI_VENDOR_ID_CAVIUM 0x177d
+#define NITROX_V_PCI_VF_DEV_ID 0x13
+
+TAILQ_HEAD(ndev_list, nitrox_device);
+static struct ndev_list ndev_list = TAILQ_HEAD_INITIALIZER(ndev_list);
+
+static struct nitrox_device *
+ndev_allocate(struct rte_pci_device *pdev)
+{
+ struct nitrox_device *ndev;
+
+ ndev = rte_zmalloc_socket("nitrox device", sizeof(*ndev),
+ RTE_CACHE_LINE_SIZE,
+ pdev->device.numa_node);
+ if (!ndev)
+ return NULL;
+
+ TAILQ_INSERT_TAIL(&ndev_list, ndev, next);
+ return ndev;
+}
+
+static void
+ndev_init(struct nitrox_device *ndev, struct rte_pci_device *pdev)
+{
+ enum nitrox_vf_mode vf_mode;
+
+ ndev->pdev = pdev;
+ ndev->bar_addr = pdev->mem_resource[0].addr;
+ vf_mode = vf_get_vf_config_mode(ndev->bar_addr);
+ ndev->nr_queues = vf_config_mode_to_nr_queues(vf_mode);
+}
+
+static struct nitrox_device *
+find_ndev(struct rte_pci_device *pdev)
+{
+ struct nitrox_device *ndev;
+
+ TAILQ_FOREACH(ndev, &ndev_list, next)
+ if (ndev->pdev == pdev)
+ return ndev;
+
+ return NULL;
+}
+
+static void
+ndev_release(struct nitrox_device *ndev)
+{
+ if (!ndev)
+ return;
+
+ TAILQ_REMOVE(&ndev_list, ndev, next);
+ rte_free(ndev);
+}
+
+static int
+nitrox_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pdev)
+{
+ struct nitrox_device *ndev;
+ int err;
+
+ /* Nitrox CSR space */
+ if (!pdev->mem_resource[0].addr)
+ return -EINVAL;
+
+ ndev = ndev_allocate(pdev);
+ if (!ndev)
+ return -ENOMEM;
+
+ ndev_init(ndev, pdev);
+ err = nitrox_sym_pmd_create(ndev);
+ if (err) {
+ ndev_release(ndev);
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+nitrox_pci_remove(struct rte_pci_device *pdev)
+{
+ struct nitrox_device *ndev;
+ int err;
+
+ ndev = find_ndev(pdev);
+ if (!ndev)
+ return -ENODEV;
+
+ err = nitrox_sym_pmd_destroy(ndev);
+ if (err)
+ return err;
+
+ ndev_release(ndev);
+ return 0;
+}
+
+static struct rte_pci_id pci_id_nitrox_map[] = {
+ {
+ /* Nitrox 5 VF */
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, NITROX_V_PCI_VF_DEV_ID)
+ },
+ {.device_id = 0},
+};
+
+static struct rte_pci_driver nitrox_pmd = {
+ .id_table = pci_id_nitrox_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = nitrox_pci_probe,
+ .remove = nitrox_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(nitrox, nitrox_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(nitrox, pci_id_nitrox_map);
diff --git a/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_device.h b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_device.h
new file mode 100644
index 000000000..6b8095f42
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_device.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _NITROX_DEVICE_H_
+#define _NITROX_DEVICE_H_
+
+#include <rte_bus_pci.h>
+#include <rte_cryptodev.h>
+
+struct nitrox_sym_device;
+
+struct nitrox_device {
+ TAILQ_ENTRY(nitrox_device) next;
+ struct rte_pci_device *pdev;
+ uint8_t *bar_addr;
+ struct nitrox_sym_device *sym_dev;
+ struct rte_device rte_sym_dev;
+ uint16_t nr_queues;
+};
+
+#endif /* _NITROX_DEVICE_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_hal.c b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_hal.c
new file mode 100644
index 000000000..433f3adb2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_hal.c
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_byteorder.h>
+
+#include "nitrox_hal.h"
+#include "nitrox_csr.h"
+
+#define MAX_VF_QUEUES 8
+#define MAX_PF_QUEUES 64
+#define NITROX_TIMER_THOLD 0x3FFFFF
+#define NITROX_COUNT_THOLD 0xFFFFFFFF
+
+void
+nps_pkt_input_ring_disable(uint8_t *bar_addr, uint16_t ring)
+{
+ union nps_pkt_in_instr_ctl pkt_in_instr_ctl;
+ uint64_t reg_addr;
+ int max_retries = 5;
+
+ reg_addr = NPS_PKT_IN_INSTR_CTLX(ring);
+ pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
+ pkt_in_instr_ctl.s.enb = 0;
+ nitrox_write_csr(bar_addr, reg_addr, pkt_in_instr_ctl.u64);
+ rte_delay_us_block(100);
+
+ /* wait for enable bit to be cleared */
+ pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
+ while (pkt_in_instr_ctl.s.enb && max_retries--) {
+ rte_delay_ms(10);
+ pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
+ }
+}
+
+void
+nps_pkt_solicited_port_disable(uint8_t *bar_addr, uint16_t port)
+{
+ union nps_pkt_slc_ctl pkt_slc_ctl;
+ uint64_t reg_addr;
+ int max_retries = 5;
+
+ /* clear enable bit */
+ reg_addr = NPS_PKT_SLC_CTLX(port);
+ pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
+ pkt_slc_ctl.s.enb = 0;
+ nitrox_write_csr(bar_addr, reg_addr, pkt_slc_ctl.u64);
+ rte_delay_us_block(100);
+
+ pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
+ while (pkt_slc_ctl.s.enb && max_retries--) {
+ rte_delay_ms(10);
+ pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
+ }
+}
+
+void
+setup_nps_pkt_input_ring(uint8_t *bar_addr, uint16_t ring, uint32_t rsize,
+ phys_addr_t raddr)
+{
+ union nps_pkt_in_instr_ctl pkt_in_instr_ctl;
+ union nps_pkt_in_instr_rsize pkt_in_instr_rsize;
+ union nps_pkt_in_instr_baoff_dbell pkt_in_instr_baoff_dbell;
+ union nps_pkt_in_done_cnts pkt_in_done_cnts;
+ uint64_t base_addr, reg_addr;
+ int max_retries = 5;
+
+ nps_pkt_input_ring_disable(bar_addr, ring);
+
+ /* write base address */
+ reg_addr = NPS_PKT_IN_INSTR_BADDRX(ring);
+ base_addr = raddr;
+ nitrox_write_csr(bar_addr, reg_addr, base_addr);
+ rte_delay_us_block(CSR_DELAY);
+
+ /* write ring size */
+ reg_addr = NPS_PKT_IN_INSTR_RSIZEX(ring);
+ pkt_in_instr_rsize.u64 = 0;
+ pkt_in_instr_rsize.s.rsize = rsize;
+ nitrox_write_csr(bar_addr, reg_addr, pkt_in_instr_rsize.u64);
+ rte_delay_us_block(CSR_DELAY);
+
+ /* clear door bell */
+ reg_addr = NPS_PKT_IN_INSTR_BAOFF_DBELLX(ring);
+ pkt_in_instr_baoff_dbell.u64 = 0;
+ pkt_in_instr_baoff_dbell.s.dbell = 0xFFFFFFFF;
+ nitrox_write_csr(bar_addr, reg_addr, pkt_in_instr_baoff_dbell.u64);
+ rte_delay_us_block(CSR_DELAY);
+
+ /* clear done count */
+ reg_addr = NPS_PKT_IN_DONE_CNTSX(ring);
+ pkt_in_done_cnts.u64 = nitrox_read_csr(bar_addr, reg_addr);
+ nitrox_write_csr(bar_addr, reg_addr, pkt_in_done_cnts.u64);
+ rte_delay_us_block(CSR_DELAY);
+
+ /* Setup PKT IN RING Interrupt Threshold */
+ reg_addr = NPS_PKT_IN_INT_LEVELSX(ring);
+ nitrox_write_csr(bar_addr, reg_addr, 0xFFFFFFFF);
+ rte_delay_us_block(CSR_DELAY);
+
+ /* enable ring */
+ reg_addr = NPS_PKT_IN_INSTR_CTLX(ring);
+ pkt_in_instr_ctl.u64 = 0;
+ pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
+ pkt_in_instr_ctl.s.is64b = 1;
+ pkt_in_instr_ctl.s.enb = 1;
+ nitrox_write_csr(bar_addr, reg_addr, pkt_in_instr_ctl.u64);
+ rte_delay_us_block(100);
+
+ pkt_in_instr_ctl.u64 = 0;
+ pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
+ /* wait for ring to be enabled */
+ while (!pkt_in_instr_ctl.s.enb && max_retries--) {
+ rte_delay_ms(10);
+ pkt_in_instr_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
+ }
+}
+
+void
+setup_nps_pkt_solicit_output_port(uint8_t *bar_addr, uint16_t port)
+{
+ union nps_pkt_slc_ctl pkt_slc_ctl;
+ union nps_pkt_slc_cnts pkt_slc_cnts;
+ union nps_pkt_slc_int_levels pkt_slc_int_levels;
+ uint64_t reg_addr;
+ int max_retries = 5;
+
+ nps_pkt_solicited_port_disable(bar_addr, port);
+
+ /* clear pkt counts */
+ reg_addr = NPS_PKT_SLC_CNTSX(port);
+ pkt_slc_cnts.u64 = nitrox_read_csr(bar_addr, reg_addr);
+ nitrox_write_csr(bar_addr, reg_addr, pkt_slc_cnts.u64);
+ rte_delay_us_block(CSR_DELAY);
+
+ /* slc interrupt levels */
+ reg_addr = NPS_PKT_SLC_INT_LEVELSX(port);
+ pkt_slc_int_levels.u64 = 0;
+ pkt_slc_int_levels.s.bmode = 0;
+ pkt_slc_int_levels.s.timet = NITROX_TIMER_THOLD;
+
+ if (NITROX_COUNT_THOLD > 0)
+ pkt_slc_int_levels.s.cnt = NITROX_COUNT_THOLD - 1;
+
+ nitrox_write_csr(bar_addr, reg_addr, pkt_slc_int_levels.u64);
+ rte_delay_us_block(CSR_DELAY);
+
+ /* enable ring */
+ reg_addr = NPS_PKT_SLC_CTLX(port);
+ pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
+ pkt_slc_ctl.s.rh = 1;
+ pkt_slc_ctl.s.z = 1;
+ pkt_slc_ctl.s.enb = 1;
+ nitrox_write_csr(bar_addr, reg_addr, pkt_slc_ctl.u64);
+ rte_delay_us_block(100);
+
+ pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
+ while (!pkt_slc_ctl.s.enb && max_retries--) {
+ rte_delay_ms(10);
+ pkt_slc_ctl.u64 = nitrox_read_csr(bar_addr, reg_addr);
+ }
+}
+
+int
+vf_get_vf_config_mode(uint8_t *bar_addr)
+{
+ union aqmq_qsz aqmq_qsz;
+ uint64_t reg_addr;
+ int q, vf_mode;
+
+ aqmq_qsz.u64 = 0;
+ aqmq_qsz.s.host_queue_size = 0xDEADBEEF;
+ reg_addr = AQMQ_QSZX(0);
+ nitrox_write_csr(bar_addr, reg_addr, aqmq_qsz.u64);
+ rte_delay_us_block(CSR_DELAY);
+
+ aqmq_qsz.u64 = 0;
+ for (q = 1; q < MAX_VF_QUEUES; q++) {
+ reg_addr = AQMQ_QSZX(q);
+ aqmq_qsz.u64 = nitrox_read_csr(bar_addr, reg_addr);
+ if (aqmq_qsz.s.host_queue_size == 0xDEADBEEF)
+ break;
+ }
+
+ switch (q) {
+ case 1:
+ vf_mode = NITROX_MODE_VF128;
+ break;
+ case 2:
+ vf_mode = NITROX_MODE_VF64;
+ break;
+ case 4:
+ vf_mode = NITROX_MODE_VF32;
+ break;
+ case 8:
+ vf_mode = NITROX_MODE_VF16;
+ break;
+ default:
+ vf_mode = 0;
+ break;
+ }
+
+ return vf_mode;
+}
+
+int
+vf_config_mode_to_nr_queues(enum nitrox_vf_mode vf_mode)
+{
+ int nr_queues;
+
+ switch (vf_mode) {
+ case NITROX_MODE_PF:
+ nr_queues = MAX_PF_QUEUES;
+ break;
+ case NITROX_MODE_VF16:
+ nr_queues = 8;
+ break;
+ case NITROX_MODE_VF32:
+ nr_queues = 4;
+ break;
+ case NITROX_MODE_VF64:
+ nr_queues = 2;
+ break;
+ case NITROX_MODE_VF128:
+ nr_queues = 1;
+ break;
+ default:
+ nr_queues = 0;
+ break;
+ }
+
+ return nr_queues;
+}
diff --git a/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_hal.h b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_hal.h
new file mode 100644
index 000000000..dcfbd11d8
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_hal.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _NITROX_HAL_H_
+#define _NITROX_HAL_H_
+
+#include <rte_cycles.h>
+#include <rte_byteorder.h>
+
+#include "nitrox_csr.h"
+
+union nps_pkt_slc_cnts {
+ uint64_t u64;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t slc_int : 1;
+ uint64_t uns_int : 1;
+ uint64_t in_int : 1;
+ uint64_t mbox_int : 1;
+ uint64_t resend : 1;
+ uint64_t raz : 5;
+ uint64_t timer : 22;
+ uint64_t cnt : 32;
+#else
+ uint64_t cnt : 32;
+ uint64_t timer : 22;
+ uint64_t raz : 5;
+ uint64_t resend : 1;
+ uint64_t mbox_int : 1;
+ uint64_t in_int : 1;
+ uint64_t uns_int : 1;
+ uint64_t slc_int : 1;
+#endif
+ } s;
+};
+
+union nps_pkt_slc_int_levels {
+ uint64_t u64;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t bmode : 1;
+ uint64_t raz : 9;
+ uint64_t timet : 22;
+ uint64_t cnt : 32;
+#else
+ uint64_t cnt : 32;
+ uint64_t timet : 22;
+ uint64_t raz : 9;
+ uint64_t bmode : 1;
+#endif
+ } s;
+};
+
+union nps_pkt_slc_ctl {
+ uint64_t u64;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t raz : 61;
+ uint64_t rh : 1;
+ uint64_t z : 1;
+ uint64_t enb : 1;
+#else
+ uint64_t enb : 1;
+ uint64_t z : 1;
+ uint64_t rh : 1;
+ uint64_t raz : 61;
+#endif
+ } s;
+};
+
+union nps_pkt_in_instr_ctl {
+ uint64_t u64;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t raz : 62;
+ uint64_t is64b : 1;
+ uint64_t enb : 1;
+#else
+ uint64_t enb : 1;
+ uint64_t is64b : 1;
+ uint64_t raz : 62;
+#endif
+ } s;
+};
+
+union nps_pkt_in_instr_rsize {
+ uint64_t u64;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t raz : 32;
+ uint64_t rsize : 32;
+#else
+ uint64_t rsize : 32;
+ uint64_t raz : 32;
+#endif
+ } s;
+};
+
+union nps_pkt_in_instr_baoff_dbell {
+ uint64_t u64;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t aoff : 32;
+ uint64_t dbell : 32;
+#else
+ uint64_t dbell : 32;
+ uint64_t aoff : 32;
+#endif
+ } s;
+};
+
+union nps_pkt_in_done_cnts {
+ uint64_t u64;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t slc_int : 1;
+ uint64_t uns_int : 1;
+ uint64_t in_int : 1;
+ uint64_t mbox_int : 1;
+ uint64_t resend : 1;
+ uint64_t raz : 27;
+ uint64_t cnt : 32;
+#else
+ uint64_t cnt : 32;
+ uint64_t raz : 27;
+ uint64_t resend : 1;
+ uint64_t mbox_int : 1;
+ uint64_t in_int : 1;
+ uint64_t uns_int : 1;
+ uint64_t slc_int : 1;
+#endif
+ } s;
+};
+
+union aqmq_qsz {
+ uint64_t u64;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t raz : 32;
+ uint64_t host_queue_size : 32;
+#else
+ uint64_t host_queue_size : 32;
+ uint64_t raz : 32;
+#endif
+ } s;
+};
+
+enum nitrox_vf_mode {
+ NITROX_MODE_PF = 0x0,
+ NITROX_MODE_VF16 = 0x1,
+ NITROX_MODE_VF32 = 0x2,
+ NITROX_MODE_VF64 = 0x3,
+ NITROX_MODE_VF128 = 0x4,
+};
+
+int vf_get_vf_config_mode(uint8_t *bar_addr);
+int vf_config_mode_to_nr_queues(enum nitrox_vf_mode vf_mode);
+void setup_nps_pkt_input_ring(uint8_t *bar_addr, uint16_t ring, uint32_t rsize,
+ phys_addr_t raddr);
+void setup_nps_pkt_solicit_output_port(uint8_t *bar_addr, uint16_t port);
+void nps_pkt_input_ring_disable(uint8_t *bar_addr, uint16_t ring);
+void nps_pkt_solicited_port_disable(uint8_t *bar_addr, uint16_t port);
+
+#endif /* _NITROX_HAL_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_logs.c b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_logs.c
new file mode 100644
index 000000000..007056cb4
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_logs.c
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_log.h>
+
+int nitrox_logtype;
+
+RTE_INIT(nitrox_init_log)
+{
+ nitrox_logtype = rte_log_register("pmd.crypto.nitrox");
+ if (nitrox_logtype >= 0)
+ rte_log_set_level(nitrox_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_logs.h b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_logs.h
new file mode 100644
index 000000000..50e52f396
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_logs.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _NITROX_LOGS_H_
+#define _NITROX_LOGS_H_
+
+#define LOG_PREFIX "NITROX: "
+#define NITROX_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, nitrox_logtype, \
+ LOG_PREFIX "%s:%d " fmt, __func__, __LINE__, ## args)
+
+extern int nitrox_logtype;
+
+#endif /* _NITROX_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_qp.c b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_qp.c
new file mode 100644
index 000000000..5e85ccbd5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_qp.c
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+
+#include "nitrox_qp.h"
+#include "nitrox_hal.h"
+#include "nitrox_logs.h"
+
+#define MAX_CMD_QLEN 16384
+#define CMDQ_PKT_IN_ALIGN 16
+
+static int
+nitrox_setup_cmdq(struct nitrox_qp *qp, uint8_t *bar_addr,
+ const char *dev_name, uint8_t instr_size, int socket_id)
+{
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+ size_t cmdq_size = qp->count * instr_size;
+ uint64_t offset;
+
+ snprintf(mz_name, sizeof(mz_name), "%s_cmdq_%d", dev_name, qp->qno);
+ mz = rte_memzone_reserve_aligned(mz_name, cmdq_size, socket_id,
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_256MB,
+ CMDQ_PKT_IN_ALIGN);
+ if (!mz) {
+ NITROX_LOG(ERR, "cmdq memzone reserve failed for %s queue\n",
+ mz_name);
+ return -ENOMEM;
+ }
+
+ qp->cmdq.mz = mz;
+ offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(qp->qno);
+ qp->cmdq.dbell_csr_addr = NITROX_CSR_ADDR(bar_addr, offset);
+ qp->cmdq.ring = mz->addr;
+ qp->cmdq.instr_size = instr_size;
+ setup_nps_pkt_input_ring(bar_addr, qp->qno, qp->count, mz->iova);
+ setup_nps_pkt_solicit_output_port(bar_addr, qp->qno);
+
+ return 0;
+}
+
+static int
+nitrox_setup_ridq(struct nitrox_qp *qp, int socket_id)
+{
+ size_t ridq_size = qp->count * sizeof(*qp->ridq);
+
+ qp->ridq = rte_zmalloc_socket("nitrox ridq", ridq_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!qp->ridq) {
+ NITROX_LOG(ERR, "Failed to create rid queue\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int
+nitrox_release_cmdq(struct nitrox_qp *qp, uint8_t *bar_addr)
+{
+ nps_pkt_solicited_port_disable(bar_addr, qp->qno);
+ nps_pkt_input_ring_disable(bar_addr, qp->qno);
+ return rte_memzone_free(qp->cmdq.mz);
+}
+
+int
+nitrox_qp_setup(struct nitrox_qp *qp, uint8_t *bar_addr, const char *dev_name,
+ uint32_t nb_descriptors, uint8_t instr_size, int socket_id)
+{
+ int err;
+ uint32_t count;
+
+ count = rte_align32pow2(nb_descriptors);
+ if (count > MAX_CMD_QLEN) {
+ NITROX_LOG(ERR, "%s: Number of descriptors too big %d,"
+ " greater than max queue length %d\n",
+ dev_name, count,
+ MAX_CMD_QLEN);
+ return -EINVAL;
+ }
+
+ qp->count = count;
+ qp->head = qp->tail = 0;
+ rte_atomic16_init(&qp->pending_count);
+ err = nitrox_setup_cmdq(qp, bar_addr, dev_name, instr_size, socket_id);
+ if (err)
+ return err;
+
+ err = nitrox_setup_ridq(qp, socket_id);
+ if (err)
+ goto ridq_err;
+
+ return 0;
+
+ridq_err:
+ nitrox_release_cmdq(qp, bar_addr);
+ return err;
+}
+
+static void
+nitrox_release_ridq(struct nitrox_qp *qp)
+{
+ rte_free(qp->ridq);
+}
+
+int
+nitrox_qp_release(struct nitrox_qp *qp, uint8_t *bar_addr)
+{
+ nitrox_release_ridq(qp);
+ return nitrox_release_cmdq(qp, bar_addr);
+}
diff --git a/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_qp.h b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_qp.h
new file mode 100644
index 000000000..d42d53f92
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_qp.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _NITROX_QP_H_
+#define _NITROX_QP_H_
+
+#include <stdbool.h>
+
+#include <rte_io.h>
+
+struct nitrox_softreq;
+
+struct command_queue {
+ const struct rte_memzone *mz;
+ uint8_t *dbell_csr_addr;
+ uint8_t *ring;
+ uint8_t instr_size;
+};
+
+struct rid {
+ struct nitrox_softreq *sr;
+};
+
+struct nitrox_qp {
+ struct command_queue cmdq;
+ struct rid *ridq;
+ uint32_t count;
+ uint32_t head;
+ uint32_t tail;
+ struct rte_mempool *sr_mp;
+ struct rte_cryptodev_stats stats;
+ uint16_t qno;
+ rte_atomic16_t pending_count;
+};
+
+static inline uint16_t
+nitrox_qp_free_count(struct nitrox_qp *qp)
+{
+ uint16_t pending_count = rte_atomic16_read(&qp->pending_count);
+
+ RTE_ASSERT(qp->count >= pending_count);
+ return (qp->count - pending_count);
+}
+
+static inline bool
+nitrox_qp_is_empty(struct nitrox_qp *qp)
+{
+ return (rte_atomic16_read(&qp->pending_count) == 0);
+}
+
+static inline uint16_t
+nitrox_qp_used_count(struct nitrox_qp *qp)
+{
+ return rte_atomic16_read(&qp->pending_count);
+}
+
+static inline struct nitrox_softreq *
+nitrox_qp_get_softreq(struct nitrox_qp *qp)
+{
+ uint32_t tail = qp->tail % qp->count;
+
+ rte_smp_rmb();
+ return qp->ridq[tail].sr;
+}
+
+static inline void
+nitrox_ring_dbell(struct nitrox_qp *qp, uint16_t cnt)
+{
+ struct command_queue *cmdq = &qp->cmdq;
+
+ if (!cnt)
+ return;
+
+ rte_io_wmb();
+ rte_write64(cnt, cmdq->dbell_csr_addr);
+}
+
+static inline void
+nitrox_qp_enqueue(struct nitrox_qp *qp, void *instr, struct nitrox_softreq *sr)
+{
+ uint32_t head = qp->head % qp->count;
+
+ qp->head++;
+ memcpy(&qp->cmdq.ring[head * qp->cmdq.instr_size],
+ instr, qp->cmdq.instr_size);
+ qp->ridq[head].sr = sr;
+ rte_smp_wmb();
+ rte_atomic16_inc(&qp->pending_count);
+}
+
+static inline void
+nitrox_qp_dequeue(struct nitrox_qp *qp)
+{
+ qp->tail++;
+ rte_atomic16_dec(&qp->pending_count);
+}
+
+int nitrox_qp_setup(struct nitrox_qp *qp, uint8_t *bar_addr,
+ const char *dev_name, uint32_t nb_descriptors,
+ uint8_t inst_size, int socket_id);
+int nitrox_qp_release(struct nitrox_qp *qp, uint8_t *bar_addr);
+
+#endif /* _NITROX_QP_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym.c b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym.c
new file mode 100644
index 000000000..fad4a7a48
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym.c
@@ -0,0 +1,733 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <stdbool.h>
+
+#include <rte_cryptodev_pmd.h>
+#include <rte_crypto.h>
+
+#include "nitrox_sym.h"
+#include "nitrox_device.h"
+#include "nitrox_sym_capabilities.h"
+#include "nitrox_qp.h"
+#include "nitrox_sym_reqmgr.h"
+#include "nitrox_sym_ctx.h"
+#include "nitrox_logs.h"
+
+#define CRYPTODEV_NAME_NITROX_PMD crypto_nitrox_sym
+#define MC_MAC_MISMATCH_ERR_CODE 0x4c
+#define NPS_PKT_IN_INSTR_SIZE 64
+#define IV_FROM_DPTR 1
+#define FLEXI_CRYPTO_ENCRYPT_HMAC 0x33
+#define AES_KEYSIZE_128 16
+#define AES_KEYSIZE_192 24
+#define AES_KEYSIZE_256 32
+#define MAX_IV_LEN 16
+
+struct nitrox_sym_device {
+ struct rte_cryptodev *cdev;
+ struct nitrox_device *ndev;
+};
+
+/* Cipher opcodes */
+enum flexi_cipher {
+ CIPHER_NULL = 0,
+ CIPHER_3DES_CBC,
+ CIPHER_3DES_ECB,
+ CIPHER_AES_CBC,
+ CIPHER_AES_ECB,
+ CIPHER_AES_CFB,
+ CIPHER_AES_CTR,
+ CIPHER_AES_GCM,
+ CIPHER_AES_XTS,
+ CIPHER_AES_CCM,
+ CIPHER_AES_CBC_CTS,
+ CIPHER_AES_ECB_CTS,
+ CIPHER_INVALID
+};
+
+/* Auth opcodes */
+enum flexi_auth {
+ AUTH_NULL = 0,
+ AUTH_MD5,
+ AUTH_SHA1,
+ AUTH_SHA2_SHA224,
+ AUTH_SHA2_SHA256,
+ AUTH_SHA2_SHA384,
+ AUTH_SHA2_SHA512,
+ AUTH_GMAC,
+ AUTH_INVALID
+};
+
+uint8_t nitrox_sym_drv_id;
+static const char nitrox_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_NITROX_PMD);
+static const struct rte_driver nitrox_rte_sym_drv = {
+ .name = nitrox_sym_drv_name,
+ .alias = nitrox_sym_drv_name
+};
+
+static int nitrox_sym_dev_qp_release(struct rte_cryptodev *cdev,
+ uint16_t qp_id);
+
+static int
+nitrox_sym_dev_config(struct rte_cryptodev *cdev,
+ struct rte_cryptodev_config *config)
+{
+ struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
+ struct nitrox_device *ndev = sym_dev->ndev;
+
+ if (config->nb_queue_pairs > ndev->nr_queues) {
+ NITROX_LOG(ERR, "Invalid queue pairs, max supported %d\n",
+ ndev->nr_queues);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+nitrox_sym_dev_start(struct rte_cryptodev *cdev)
+{
+ /* SE cores initialization is done in PF */
+ RTE_SET_USED(cdev);
+ return 0;
+}
+
+static void
+nitrox_sym_dev_stop(struct rte_cryptodev *cdev)
+{
+ /* SE cores cleanup is done in PF */
+ RTE_SET_USED(cdev);
+}
+
+static int
+nitrox_sym_dev_close(struct rte_cryptodev *cdev)
+{
+ int i, ret;
+
+ for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
+ ret = nitrox_sym_dev_qp_release(cdev, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+nitrox_sym_dev_info_get(struct rte_cryptodev *cdev,
+ struct rte_cryptodev_info *info)
+{
+ struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
+ struct nitrox_device *ndev = sym_dev->ndev;
+
+ if (!info)
+ return;
+
+ info->max_nb_queue_pairs = ndev->nr_queues;
+ info->feature_flags = cdev->feature_flags;
+ info->capabilities = nitrox_get_sym_capabilities();
+ info->driver_id = nitrox_sym_drv_id;
+ info->sym.max_nb_sessions = 0;
+}
+
+static void
+nitrox_sym_dev_stats_get(struct rte_cryptodev *cdev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
+ struct nitrox_qp *qp = cdev->data->queue_pairs[qp_id];
+
+ if (!qp)
+ continue;
+
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
+ }
+}
+
+static void
+nitrox_sym_dev_stats_reset(struct rte_cryptodev *cdev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
+ struct nitrox_qp *qp = cdev->data->queue_pairs[qp_id];
+
+ if (!qp)
+ continue;
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ }
+}
+
+static int
+nitrox_sym_dev_qp_setup(struct rte_cryptodev *cdev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
+ struct nitrox_device *ndev = sym_dev->ndev;
+ struct nitrox_qp *qp = NULL;
+ int err;
+
+ NITROX_LOG(DEBUG, "queue %d\n", qp_id);
+ if (qp_id >= ndev->nr_queues) {
+ NITROX_LOG(ERR, "queue %u invalid, max queues supported %d\n",
+ qp_id, ndev->nr_queues);
+ return -EINVAL;
+ }
+
+ if (cdev->data->queue_pairs[qp_id]) {
+ err = nitrox_sym_dev_qp_release(cdev, qp_id);
+ if (err)
+ return err;
+ }
+
+ qp = rte_zmalloc_socket("nitrox PMD qp", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!qp) {
+ NITROX_LOG(ERR, "Failed to allocate nitrox qp\n");
+ return -ENOMEM;
+ }
+
+ qp->qno = qp_id;
+ err = nitrox_qp_setup(qp, ndev->bar_addr, cdev->data->name,
+ qp_conf->nb_descriptors, NPS_PKT_IN_INSTR_SIZE,
+ socket_id);
+ if (unlikely(err))
+ goto qp_setup_err;
+
+ qp->sr_mp = nitrox_sym_req_pool_create(cdev, qp->count, qp_id,
+ socket_id);
+ if (unlikely(!qp->sr_mp))
+ goto req_pool_err;
+
+ cdev->data->queue_pairs[qp_id] = qp;
+ NITROX_LOG(DEBUG, "queue %d setup done\n", qp_id);
+ return 0;
+
+req_pool_err:
+ nitrox_qp_release(qp, ndev->bar_addr);
+qp_setup_err:
+ rte_free(qp);
+ return err;
+}
+
+static int
+nitrox_sym_dev_qp_release(struct rte_cryptodev *cdev, uint16_t qp_id)
+{
+ struct nitrox_sym_device *sym_dev = cdev->data->dev_private;
+ struct nitrox_device *ndev = sym_dev->ndev;
+ struct nitrox_qp *qp;
+ int err;
+
+ NITROX_LOG(DEBUG, "queue %d\n", qp_id);
+ if (qp_id >= ndev->nr_queues) {
+ NITROX_LOG(ERR, "queue %u invalid, max queues supported %d\n",
+ qp_id, ndev->nr_queues);
+ return -EINVAL;
+ }
+
+ qp = cdev->data->queue_pairs[qp_id];
+ if (!qp) {
+ NITROX_LOG(DEBUG, "queue %u already freed\n", qp_id);
+ return 0;
+ }
+
+ if (!nitrox_qp_is_empty(qp)) {
+ NITROX_LOG(ERR, "queue %d not empty\n", qp_id);
+ return -EAGAIN;
+ }
+
+ cdev->data->queue_pairs[qp_id] = NULL;
+ err = nitrox_qp_release(qp, ndev->bar_addr);
+ nitrox_sym_req_pool_free(qp->sr_mp);
+ rte_free(qp);
+ NITROX_LOG(DEBUG, "queue %d release done\n", qp_id);
+ return err;
+}
+
+static unsigned int
+nitrox_sym_dev_sess_get_size(__rte_unused struct rte_cryptodev *cdev)
+{
+ return sizeof(struct nitrox_crypto_ctx);
+}
+
+static enum nitrox_chain
+get_crypto_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+ enum nitrox_chain res = NITROX_CHAIN_NOT_SUPPORTED;
+
+ if (unlikely(xform == NULL))
+ return res;
+
+ switch (xform->type) {
+ case RTE_CRYPTO_SYM_XFORM_AUTH:
+ if (xform->next == NULL) {
+ res = NITROX_CHAIN_NOT_SUPPORTED;
+ } else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY &&
+ xform->next->cipher.op ==
+ RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ res = NITROX_CHAIN_AUTH_CIPHER;
+ } else {
+ NITROX_LOG(ERR, "auth op %d, cipher op %d\n",
+ xform->auth.op, xform->next->cipher.op);
+ }
+ }
+ break;
+ case RTE_CRYPTO_SYM_XFORM_CIPHER:
+ if (xform->next == NULL) {
+ res = NITROX_CHAIN_CIPHER_ONLY;
+ } else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
+ xform->next->auth.op ==
+ RTE_CRYPTO_AUTH_OP_GENERATE) {
+ res = NITROX_CHAIN_CIPHER_AUTH;
+ } else {
+ NITROX_LOG(ERR, "cipher op %d, auth op %d\n",
+ xform->cipher.op, xform->next->auth.op);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return res;
+}
+
+static enum flexi_cipher
+get_flexi_cipher_type(enum rte_crypto_cipher_algorithm algo, bool *is_aes)
+{
+ enum flexi_cipher type;
+
+ switch (algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ type = CIPHER_AES_CBC;
+ *is_aes = true;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ type = CIPHER_3DES_CBC;
+ *is_aes = false;
+ break;
+ default:
+ type = CIPHER_INVALID;
+ NITROX_LOG(ERR, "Algorithm not supported %d\n", algo);
+ break;
+ }
+
+ return type;
+}
+
+static int
+flexi_aes_keylen(size_t keylen, bool is_aes)
+{
+ int aes_keylen;
+
+ if (!is_aes)
+ return 0;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ aes_keylen = 1;
+ break;
+ case AES_KEYSIZE_192:
+ aes_keylen = 2;
+ break;
+ case AES_KEYSIZE_256:
+ aes_keylen = 3;
+ break;
+ default:
+ NITROX_LOG(ERR, "Invalid keylen %zu\n", keylen);
+ aes_keylen = -EINVAL;
+ break;
+ }
+
+ return aes_keylen;
+}
+
+static bool
+crypto_key_is_valid(struct rte_crypto_cipher_xform *xform,
+ struct flexi_crypto_context *fctx)
+{
+ if (unlikely(xform->key.length > sizeof(fctx->crypto.key))) {
+ NITROX_LOG(ERR, "Invalid crypto key length %d\n",
+ xform->key.length);
+ return false;
+ }
+
+ return true;
+}
+
+static int
+configure_cipher_ctx(struct rte_crypto_cipher_xform *xform,
+ struct nitrox_crypto_ctx *ctx)
+{
+ enum flexi_cipher type;
+ bool cipher_is_aes = false;
+ int aes_keylen;
+ struct flexi_crypto_context *fctx = &ctx->fctx;
+
+ type = get_flexi_cipher_type(xform->algo, &cipher_is_aes);
+ if (unlikely(type == CIPHER_INVALID))
+ return -ENOTSUP;
+
+ aes_keylen = flexi_aes_keylen(xform->key.length, cipher_is_aes);
+ if (unlikely(aes_keylen < 0))
+ return -EINVAL;
+
+ if (unlikely(!cipher_is_aes && !crypto_key_is_valid(xform, fctx)))
+ return -EINVAL;
+
+ if (unlikely(xform->iv.length > MAX_IV_LEN))
+ return -EINVAL;
+
+ fctx->flags = rte_be_to_cpu_64(fctx->flags);
+ fctx->w0.cipher_type = type;
+ fctx->w0.aes_keylen = aes_keylen;
+ fctx->w0.iv_source = IV_FROM_DPTR;
+ fctx->flags = rte_cpu_to_be_64(fctx->flags);
+ memset(fctx->crypto.key, 0, sizeof(fctx->crypto.key));
+ memcpy(fctx->crypto.key, xform->key.data, xform->key.length);
+
+ ctx->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
+ ctx->req_op = (xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ NITROX_OP_ENCRYPT : NITROX_OP_DECRYPT;
+ ctx->iv.offset = xform->iv.offset;
+ ctx->iv.length = xform->iv.length;
+ return 0;
+}
+
+static enum flexi_auth
+get_flexi_auth_type(enum rte_crypto_auth_algorithm algo)
+{
+ enum flexi_auth type;
+
+ switch (algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ type = AUTH_SHA1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ type = AUTH_SHA2_SHA224;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ type = AUTH_SHA2_SHA256;
+ break;
+ default:
+ NITROX_LOG(ERR, "Algorithm not supported %d\n", algo);
+ type = AUTH_INVALID;
+ break;
+ }
+
+ return type;
+}
+
+static bool
+auth_key_digest_is_valid(struct rte_crypto_auth_xform *xform,
+ struct flexi_crypto_context *fctx)
+{
+ if (unlikely(!xform->key.data && xform->key.length)) {
+ NITROX_LOG(ERR, "Invalid auth key\n");
+ return false;
+ }
+
+ if (unlikely(xform->key.length > sizeof(fctx->auth.opad))) {
+ NITROX_LOG(ERR, "Invalid auth key length %d\n",
+ xform->key.length);
+ return false;
+ }
+
+ return true;
+}
+
+static int
+configure_auth_ctx(struct rte_crypto_auth_xform *xform,
+ struct nitrox_crypto_ctx *ctx)
+{
+ enum flexi_auth type;
+ struct flexi_crypto_context *fctx = &ctx->fctx;
+
+ type = get_flexi_auth_type(xform->algo);
+ if (unlikely(type == AUTH_INVALID))
+ return -ENOTSUP;
+
+ if (unlikely(!auth_key_digest_is_valid(xform, fctx)))
+ return -EINVAL;
+
+ ctx->auth_op = xform->op;
+ ctx->auth_algo = xform->algo;
+ ctx->digest_length = xform->digest_length;
+
+ fctx->flags = rte_be_to_cpu_64(fctx->flags);
+ fctx->w0.hash_type = type;
+ fctx->w0.auth_input_type = 1;
+ fctx->w0.mac_len = xform->digest_length;
+ fctx->flags = rte_cpu_to_be_64(fctx->flags);
+ memset(&fctx->auth, 0, sizeof(fctx->auth));
+ memcpy(fctx->auth.opad, xform->key.data, xform->key.length);
+ return 0;
+}
+
+static int
+nitrox_sym_dev_sess_configure(struct rte_cryptodev *cdev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *mp_obj;
+ struct nitrox_crypto_ctx *ctx;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ struct rte_crypto_auth_xform *auth_xform = NULL;
+
+ if (rte_mempool_get(mempool, &mp_obj)) {
+ NITROX_LOG(ERR, "Couldn't allocate context\n");
+ return -ENOMEM;
+ }
+
+ ctx = mp_obj;
+ ctx->nitrox_chain = get_crypto_chain_order(xform);
+ switch (ctx->nitrox_chain) {
+ case NITROX_CHAIN_CIPHER_AUTH:
+ cipher_xform = &xform->cipher;
+ auth_xform = &xform->next->auth;
+ break;
+ case NITROX_CHAIN_AUTH_CIPHER:
+ auth_xform = &xform->auth;
+ cipher_xform = &xform->next->cipher;
+ break;
+ default:
+ NITROX_LOG(ERR, "Crypto chain not supported\n");
+ goto err;
+ }
+
+ if (cipher_xform && unlikely(configure_cipher_ctx(cipher_xform, ctx))) {
+ NITROX_LOG(ERR, "Failed to configure cipher ctx\n");
+ goto err;
+ }
+
+ if (auth_xform && unlikely(configure_auth_ctx(auth_xform, ctx))) {
+ NITROX_LOG(ERR, "Failed to configure auth ctx\n");
+ goto err;
+ }
+
+ ctx->iova = rte_mempool_virt2iova(ctx);
+ set_sym_session_private_data(sess, cdev->driver_id, ctx);
+ return 0;
+err:
+ rte_mempool_put(mempool, mp_obj);
+ return -EINVAL;
+}
+
+static void
+nitrox_sym_dev_sess_clear(struct rte_cryptodev *cdev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ struct nitrox_crypto_ctx *ctx = get_sym_session_private_data(sess,
+ cdev->driver_id);
+ struct rte_mempool *sess_mp;
+
+ if (!ctx)
+ return;
+
+ memset(ctx, 0, sizeof(*ctx));
+ sess_mp = rte_mempool_from_obj(ctx);
+ set_sym_session_private_data(sess, cdev->driver_id, NULL);
+ rte_mempool_put(sess_mp, ctx);
+}
+
+static struct nitrox_crypto_ctx *
+get_crypto_ctx(struct rte_crypto_op *op)
+{
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (likely(op->sym->session))
+ return get_sym_session_private_data(op->sym->session,
+ nitrox_sym_drv_id);
+ }
+
+ return NULL;
+}
+
+static int
+nitrox_enq_single_op(struct nitrox_qp *qp, struct rte_crypto_op *op)
+{
+ struct nitrox_crypto_ctx *ctx;
+ struct nitrox_softreq *sr;
+ int err;
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ ctx = get_crypto_ctx(op);
+ if (unlikely(!ctx)) {
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
+
+ if (unlikely(rte_mempool_get(qp->sr_mp, (void **)&sr)))
+ return -ENOMEM;
+
+ err = nitrox_process_se_req(qp->qno, op, ctx, sr);
+ if (unlikely(err)) {
+ rte_mempool_put(qp->sr_mp, sr);
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return err;
+ }
+
+ nitrox_qp_enqueue(qp, nitrox_sym_instr_addr(sr), sr);
+ return 0;
+}
+
+static uint16_t
+nitrox_sym_dev_enq_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct nitrox_qp *qp = queue_pair;
+ uint16_t free_slots = 0;
+ uint16_t cnt = 0;
+ bool err = false;
+
+ free_slots = nitrox_qp_free_count(qp);
+ if (nb_ops > free_slots)
+ nb_ops = free_slots;
+
+ for (cnt = 0; cnt < nb_ops; cnt++) {
+ if (unlikely(nitrox_enq_single_op(qp, ops[cnt]))) {
+ err = true;
+ break;
+ }
+ }
+
+ nitrox_ring_dbell(qp, cnt);
+ qp->stats.enqueued_count += cnt;
+ if (unlikely(err))
+ qp->stats.enqueue_err_count++;
+
+ return cnt;
+}
+
+static int
+nitrox_deq_single_op(struct nitrox_qp *qp, struct rte_crypto_op **op_ptr)
+{
+ struct nitrox_softreq *sr;
+ int ret;
+ struct rte_crypto_op *op;
+
+ sr = nitrox_qp_get_softreq(qp);
+ ret = nitrox_check_se_req(sr, op_ptr);
+ if (ret < 0)
+ return -EAGAIN;
+
+ op = *op_ptr;
+ nitrox_qp_dequeue(qp);
+ rte_mempool_put(qp->sr_mp, sr);
+ if (!ret) {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ qp->stats.dequeued_count++;
+
+ return 0;
+ }
+
+ if (ret == MC_MAC_MISMATCH_ERR_CODE)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ else
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+ qp->stats.dequeue_err_count++;
+ return 0;
+}
+
+static uint16_t
+nitrox_sym_dev_deq_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct nitrox_qp *qp = queue_pair;
+ uint16_t filled_slots = nitrox_qp_used_count(qp);
+ int cnt = 0;
+
+ if (nb_ops > filled_slots)
+ nb_ops = filled_slots;
+
+ for (cnt = 0; cnt < nb_ops; cnt++)
+ if (nitrox_deq_single_op(qp, &ops[cnt]))
+ break;
+
+ return cnt;
+}
+
+static struct rte_cryptodev_ops nitrox_cryptodev_ops = {
+ .dev_configure = nitrox_sym_dev_config,
+ .dev_start = nitrox_sym_dev_start,
+ .dev_stop = nitrox_sym_dev_stop,
+ .dev_close = nitrox_sym_dev_close,
+ .dev_infos_get = nitrox_sym_dev_info_get,
+ .stats_get = nitrox_sym_dev_stats_get,
+ .stats_reset = nitrox_sym_dev_stats_reset,
+ .queue_pair_setup = nitrox_sym_dev_qp_setup,
+ .queue_pair_release = nitrox_sym_dev_qp_release,
+ .sym_session_get_size = nitrox_sym_dev_sess_get_size,
+ .sym_session_configure = nitrox_sym_dev_sess_configure,
+ .sym_session_clear = nitrox_sym_dev_sess_clear
+};
+
+int
+nitrox_sym_pmd_create(struct nitrox_device *ndev)
+{
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = ndev->pdev->device.numa_node,
+ .private_data_size = sizeof(struct nitrox_sym_device)
+ };
+ struct rte_cryptodev *cdev;
+
+ rte_pci_device_name(&ndev->pdev->addr, name, sizeof(name));
+ snprintf(name + strlen(name), RTE_CRYPTODEV_NAME_MAX_LEN - strlen(name),
+ "_n5sym");
+ ndev->rte_sym_dev.driver = &nitrox_rte_sym_drv;
+ ndev->rte_sym_dev.numa_node = ndev->pdev->device.numa_node;
+ ndev->rte_sym_dev.devargs = NULL;
+ cdev = rte_cryptodev_pmd_create(name, &ndev->rte_sym_dev,
+ &init_params);
+ if (!cdev) {
+ NITROX_LOG(ERR, "Cryptodev '%s' creation failed\n", name);
+ return -ENODEV;
+ }
+
+ ndev->rte_sym_dev.name = cdev->data->name;
+ cdev->driver_id = nitrox_sym_drv_id;
+ cdev->dev_ops = &nitrox_cryptodev_ops;
+ cdev->enqueue_burst = nitrox_sym_dev_enq_burst;
+ cdev->dequeue_burst = nitrox_sym_dev_deq_burst;
+ cdev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+
+ ndev->sym_dev = cdev->data->dev_private;
+ ndev->sym_dev->cdev = cdev;
+ ndev->sym_dev->ndev = ndev;
+ NITROX_LOG(DEBUG, "Created cryptodev '%s', dev_id %d, drv_id %d\n",
+ cdev->data->name, cdev->data->dev_id, nitrox_sym_drv_id);
+ return 0;
+}
+
+int
+nitrox_sym_pmd_destroy(struct nitrox_device *ndev)
+{
+ return rte_cryptodev_pmd_destroy(ndev->sym_dev->cdev);
+}
+
+static struct cryptodev_driver nitrox_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(nitrox_crypto_drv,
+ nitrox_rte_sym_drv,
+ nitrox_sym_drv_id);
diff --git a/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym.h b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym.h
new file mode 100644
index 000000000..f30847e8a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _NITROX_SYM_H_
+#define _NITROX_SYM_H_
+
+struct nitrox_device;
+
+int nitrox_sym_pmd_create(struct nitrox_device *ndev);
+int nitrox_sym_pmd_destroy(struct nitrox_device *ndev);
+
+#endif /* _NITROX_SYM_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym_capabilities.c b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym_capabilities.c
new file mode 100644
index 000000000..dc4df9185
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym_capabilities.c
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include "nitrox_sym_capabilities.h"
+
+static const struct rte_cryptodev_capabilities nitrox_capabilities[] = {
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 20,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 28,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 32,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+const struct rte_cryptodev_capabilities *
+nitrox_get_sym_capabilities(void)
+{
+ return nitrox_capabilities;
+}
diff --git a/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym_capabilities.h b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym_capabilities.h
new file mode 100644
index 000000000..cb2d97572
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym_capabilities.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _NITROX_SYM_CAPABILITIES_H_
+#define _NITROX_SYM_CAPABILITIES_H_
+
+#include <rte_cryptodev.h>
+
+const struct rte_cryptodev_capabilities *nitrox_get_sym_capabilities(void);
+
+#endif /* _NITROX_SYM_CAPABILITIES_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym_ctx.h b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym_ctx.h
new file mode 100644
index 000000000..2985e519f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym_ctx.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _NITROX_SYM_CTX_H_
+#define _NITROX_SYM_CTX_H_
+
+#include <stdbool.h>
+
+#include <rte_crypto.h>
+
+#define AES_MAX_KEY_SIZE 32
+#define AES_BLOCK_SIZE 16
+
+enum nitrox_chain {
+ NITROX_CHAIN_CIPHER_ONLY,
+ NITROX_CHAIN_CIPHER_AUTH,
+ NITROX_CHAIN_AUTH_CIPHER,
+ NITROX_CHAIN_COMBINED,
+ NITROX_CHAIN_NOT_SUPPORTED
+};
+
+enum nitrox_op {
+ NITROX_OP_ENCRYPT,
+ NITROX_OP_DECRYPT,
+};
+
+struct crypto_keys {
+ uint8_t key[AES_MAX_KEY_SIZE];
+ uint8_t iv[AES_BLOCK_SIZE];
+};
+
+struct auth_keys {
+ uint8_t ipad[64];
+ uint8_t opad[64];
+};
+
+struct flexi_crypto_context {
+ union {
+ uint64_t flags;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t cipher_type : 4;
+ uint64_t reserved_59 : 1;
+ uint64_t aes_keylen : 2;
+ uint64_t iv_source : 1;
+ uint64_t hash_type : 4;
+ uint64_t reserved_49_51 : 3;
+ uint64_t auth_input_type : 1;
+ uint64_t mac_len : 8;
+ uint64_t reserved_0_39 : 40;
+#else
+ uint64_t reserved_0_39 : 40;
+ uint64_t mac_len : 8;
+ uint64_t auth_input_type : 1;
+ uint64_t reserved_49_51 : 3;
+ uint64_t hash_type : 4;
+ uint64_t iv_source : 1;
+ uint64_t aes_keylen : 2;
+ uint64_t reserved_59 : 1;
+ uint64_t cipher_type : 4;
+#endif
+ } w0;
+ };
+ struct crypto_keys crypto;
+ struct auth_keys auth;
+};
+
+struct nitrox_crypto_ctx {
+ struct flexi_crypto_context fctx;
+ enum nitrox_chain nitrox_chain;
+ enum rte_crypto_auth_operation auth_op;
+ enum rte_crypto_auth_algorithm auth_algo;
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } iv;
+ rte_iova_t iova;
+ uint16_t digest_length;
+ uint8_t opcode;
+ uint8_t req_op;
+};
+
+#endif /* _NITROX_SYM_CTX_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym_reqmgr.c b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym_reqmgr.c
new file mode 100644
index 000000000..d9b426776
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym_reqmgr.c
@@ -0,0 +1,635 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#include <rte_cycles.h>
+#include <rte_errno.h>
+
+#include "nitrox_sym_reqmgr.h"
+#include "nitrox_logs.h"
+
+#define MAX_SGBUF_CNT 16
+#define MAX_SGCOMP_CNT 5
+/* SLC_STORE_INFO */
+#define MIN_UDD_LEN 16
+/* PKT_IN_HDR + SLC_STORE_INFO */
+#define FDATA_SIZE 32
+/* Base destination port for the solicited requests */
+#define SOLICIT_BASE_DPORT 256
+#define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL
+#define CMD_TIMEOUT 2
+
+struct gphdr {
+ uint16_t param0;
+ uint16_t param1;
+ uint16_t param2;
+ uint16_t param3;
+};
+
+union pkt_instr_hdr {
+ uint64_t value;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t raz_48_63 : 16;
+ uint64_t g : 1;
+ uint64_t gsz : 7;
+ uint64_t ihi : 1;
+ uint64_t ssz : 7;
+ uint64_t raz_30_31 : 2;
+ uint64_t fsz : 6;
+ uint64_t raz_16_23 : 8;
+ uint64_t tlen : 16;
+#else
+ uint64_t tlen : 16;
+ uint64_t raz_16_23 : 8;
+ uint64_t fsz : 6;
+ uint64_t raz_30_31 : 2;
+ uint64_t ssz : 7;
+ uint64_t ihi : 1;
+ uint64_t gsz : 7;
+ uint64_t g : 1;
+ uint64_t raz_48_63 : 16;
+#endif
+ } s;
+};
+
+union pkt_hdr {
+ uint64_t value[2];
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t opcode : 8;
+ uint64_t arg : 8;
+ uint64_t ctxc : 2;
+ uint64_t unca : 1;
+ uint64_t raz_44 : 1;
+ uint64_t info : 3;
+ uint64_t destport : 9;
+ uint64_t unc : 8;
+ uint64_t raz_19_23 : 5;
+ uint64_t grp : 3;
+ uint64_t raz_15 : 1;
+ uint64_t ctxl : 7;
+ uint64_t uddl : 8;
+#else
+ uint64_t uddl : 8;
+ uint64_t ctxl : 7;
+ uint64_t raz_15 : 1;
+ uint64_t grp : 3;
+ uint64_t raz_19_23 : 5;
+ uint64_t unc : 8;
+ uint64_t destport : 9;
+ uint64_t info : 3;
+ uint64_t raz_44 : 1;
+ uint64_t unca : 1;
+ uint64_t ctxc : 2;
+ uint64_t arg : 8;
+ uint64_t opcode : 8;
+#endif
+ uint64_t ctxp;
+ } s;
+};
+
+union slc_store_info {
+ uint64_t value[2];
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t raz_39_63 : 25;
+ uint64_t ssz : 7;
+ uint64_t raz_0_31 : 32;
+#else
+ uint64_t raz_0_31 : 32;
+ uint64_t ssz : 7;
+ uint64_t raz_39_63 : 25;
+#endif
+ uint64_t rptr;
+ } s;
+};
+
+struct nps_pkt_instr {
+ uint64_t dptr0;
+ union pkt_instr_hdr ih;
+ union pkt_hdr irh;
+ union slc_store_info slc;
+ uint64_t fdata[2];
+};
+
+struct resp_hdr {
+ uint64_t orh;
+ uint64_t completion;
+};
+
+struct nitrox_sglist {
+ uint16_t len;
+ uint16_t raz0;
+ uint32_t raz1;
+ rte_iova_t iova;
+ void *virt;
+};
+
+struct nitrox_sgcomp {
+ uint16_t len[4];
+ uint64_t iova[4];
+};
+
+struct nitrox_sgtable {
+ uint8_t map_bufs_cnt;
+ uint8_t nr_sgcomp;
+ uint16_t total_bytes;
+
+ struct nitrox_sglist sglist[MAX_SGBUF_CNT];
+ struct nitrox_sgcomp sgcomp[MAX_SGCOMP_CNT];
+};
+
+struct iv {
+ uint8_t *virt;
+ rte_iova_t iova;
+ uint16_t len;
+};
+
+struct nitrox_softreq {
+ struct nitrox_crypto_ctx *ctx;
+ struct rte_crypto_op *op;
+ struct gphdr gph;
+ struct nps_pkt_instr instr;
+ struct resp_hdr resp;
+ struct nitrox_sgtable in;
+ struct nitrox_sgtable out;
+ struct iv iv;
+ uint64_t timeout;
+ rte_iova_t dptr;
+ rte_iova_t rptr;
+ rte_iova_t iova;
+};
+
+static void
+softreq_init(struct nitrox_softreq *sr, rte_iova_t iova)
+{
+ memset(sr, 0, sizeof(*sr));
+ sr->iova = iova;
+}
+
+/*
+ * 64-Byte Instruction Format
+ *
+ * ----------------------
+ * | DPTR0 | 8 bytes
+ * ----------------------
+ * | PKT_IN_INSTR_HDR | 8 bytes
+ * ----------------------
+ * | PKT_IN_HDR | 16 bytes
+ * ----------------------
+ * | SLC_INFO | 16 bytes
+ * ----------------------
+ * | Front data | 16 bytes
+ * ----------------------
+ */
+static void
+create_se_instr(struct nitrox_softreq *sr, uint8_t qno)
+{
+ struct nitrox_crypto_ctx *ctx = sr->ctx;
+ rte_iova_t ctx_handle;
+
+ /* fill the packet instruction */
+ /* word 0 */
+ sr->instr.dptr0 = rte_cpu_to_be_64(sr->dptr);
+
+ /* word 1 */
+ sr->instr.ih.value = 0;
+ sr->instr.ih.s.g = 1;
+ sr->instr.ih.s.gsz = sr->in.map_bufs_cnt;
+ sr->instr.ih.s.ssz = sr->out.map_bufs_cnt;
+ sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
+ sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
+ sr->instr.ih.value = rte_cpu_to_be_64(sr->instr.ih.value);
+
+ /* word 2 */
+ sr->instr.irh.value[0] = 0;
+ sr->instr.irh.s.uddl = MIN_UDD_LEN;
+ /* context length in 64-bit words */
+ sr->instr.irh.s.ctxl = RTE_ALIGN_MUL_CEIL(sizeof(ctx->fctx), 8) / 8;
+ /* offset from solicit base port 256 */
+ sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno;
+ /* Invalid context cache */
+ sr->instr.irh.s.ctxc = 0x3;
+ sr->instr.irh.s.arg = ctx->req_op;
+ sr->instr.irh.s.opcode = ctx->opcode;
+ sr->instr.irh.value[0] = rte_cpu_to_be_64(sr->instr.irh.value[0]);
+
+ /* word 3 */
+ ctx_handle = ctx->iova + offsetof(struct nitrox_crypto_ctx, fctx);
+ sr->instr.irh.s.ctxp = rte_cpu_to_be_64(ctx_handle);
+
+ /* word 4 */
+ sr->instr.slc.value[0] = 0;
+ sr->instr.slc.s.ssz = sr->out.map_bufs_cnt;
+ sr->instr.slc.value[0] = rte_cpu_to_be_64(sr->instr.slc.value[0]);
+
+ /* word 5 */
+ sr->instr.slc.s.rptr = rte_cpu_to_be_64(sr->rptr);
+ /*
+ * No conversion for front data,
+ * It goes into payload
+ * put GP Header in front data
+ */
+ memcpy(&sr->instr.fdata[0], &sr->gph, sizeof(sr->instr.fdata[0]));
+ sr->instr.fdata[1] = 0;
+}
+
+static void
+softreq_copy_iv(struct nitrox_softreq *sr)
+{
+ sr->iv.virt = rte_crypto_op_ctod_offset(sr->op, uint8_t *,
+ sr->ctx->iv.offset);
+ sr->iv.iova = rte_crypto_op_ctophys_offset(sr->op, sr->ctx->iv.offset);
+ sr->iv.len = sr->ctx->iv.length;
+}
+
+static int
+extract_cipher_auth_digest(struct nitrox_softreq *sr,
+ struct nitrox_sglist *digest)
+{
+ struct rte_crypto_op *op = sr->op;
+ struct rte_mbuf *mdst = op->sym->m_dst ? op->sym->m_dst :
+ op->sym->m_src;
+
+ if (sr->ctx->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY &&
+ unlikely(!op->sym->auth.digest.data))
+ return -EINVAL;
+
+ digest->len = sr->ctx->digest_length;
+ if (op->sym->auth.digest.data) {
+ digest->iova = op->sym->auth.digest.phys_addr;
+ digest->virt = op->sym->auth.digest.data;
+ return 0;
+ }
+
+ if (unlikely(rte_pktmbuf_data_len(mdst) < op->sym->auth.data.offset +
+ op->sym->auth.data.length + digest->len))
+ return -EINVAL;
+
+ digest->iova = rte_pktmbuf_mtophys_offset(mdst,
+ op->sym->auth.data.offset +
+ op->sym->auth.data.length);
+ digest->virt = rte_pktmbuf_mtod_offset(mdst, uint8_t *,
+ op->sym->auth.data.offset +
+ op->sym->auth.data.length);
+ return 0;
+}
+
+static void
+fill_sglist(struct nitrox_sgtable *sgtbl, uint16_t len, rte_iova_t iova,
+ void *virt)
+{
+ struct nitrox_sglist *sglist = sgtbl->sglist;
+ uint8_t cnt = sgtbl->map_bufs_cnt;
+
+ if (unlikely(!len))
+ return;
+
+ sglist[cnt].len = len;
+ sglist[cnt].iova = iova;
+ sglist[cnt].virt = virt;
+ sgtbl->total_bytes += len;
+ cnt++;
+ sgtbl->map_bufs_cnt = cnt;
+}
+
+static int
+create_sglist_from_mbuf(struct nitrox_sgtable *sgtbl, struct rte_mbuf *mbuf,
+ uint32_t off, int datalen)
+{
+ struct nitrox_sglist *sglist = sgtbl->sglist;
+ uint8_t cnt = sgtbl->map_bufs_cnt;
+ struct rte_mbuf *m;
+ int mlen;
+
+ if (unlikely(datalen <= 0))
+ return 0;
+
+ for (m = mbuf; m && off > rte_pktmbuf_data_len(m); m = m->next)
+ off -= rte_pktmbuf_data_len(m);
+
+ if (unlikely(!m))
+ return -EIO;
+
+ mlen = rte_pktmbuf_data_len(m) - off;
+ if (datalen <= mlen)
+ mlen = datalen;
+ sglist[cnt].len = mlen;
+ sglist[cnt].iova = rte_pktmbuf_mtophys_offset(m, off);
+ sglist[cnt].virt = rte_pktmbuf_mtod_offset(m, uint8_t *, off);
+ sgtbl->total_bytes += mlen;
+ cnt++;
+ datalen -= mlen;
+ for (m = m->next; m && datalen; m = m->next) {
+ mlen = rte_pktmbuf_data_len(m) < datalen ?
+ rte_pktmbuf_data_len(m) : datalen;
+ sglist[cnt].len = mlen;
+ sglist[cnt].iova = rte_pktmbuf_mtophys(m);
+ sglist[cnt].virt = rte_pktmbuf_mtod(m, uint8_t *);
+ sgtbl->total_bytes += mlen;
+ cnt++;
+ datalen -= mlen;
+ }
+
+ RTE_VERIFY(cnt <= MAX_SGBUF_CNT);
+ sgtbl->map_bufs_cnt = cnt;
+ return 0;
+}
+
+static int
+create_cipher_auth_sglist(struct nitrox_softreq *sr,
+ struct nitrox_sgtable *sgtbl, struct rte_mbuf *mbuf)
+{
+ struct rte_crypto_op *op = sr->op;
+ int auth_only_len;
+ int err;
+
+ fill_sglist(sgtbl, sr->iv.len, sr->iv.iova, sr->iv.virt);
+ auth_only_len = op->sym->auth.data.length - op->sym->cipher.data.length;
+ if (unlikely(auth_only_len < 0))
+ return -EINVAL;
+
+ err = create_sglist_from_mbuf(sgtbl, mbuf, op->sym->auth.data.offset,
+ auth_only_len);
+ if (unlikely(err))
+ return err;
+
+ err = create_sglist_from_mbuf(sgtbl, mbuf, op->sym->cipher.data.offset,
+ op->sym->cipher.data.length);
+ if (unlikely(err))
+ return err;
+
+ return 0;
+}
+
+static void
+create_sgcomp(struct nitrox_sgtable *sgtbl)
+{
+ int i, j, nr_sgcomp;
+ struct nitrox_sgcomp *sgcomp = sgtbl->sgcomp;
+ struct nitrox_sglist *sglist = sgtbl->sglist;
+
+ nr_sgcomp = RTE_ALIGN_MUL_CEIL(sgtbl->map_bufs_cnt, 4) / 4;
+ sgtbl->nr_sgcomp = nr_sgcomp;
+ for (i = 0; i < nr_sgcomp; i++, sgcomp++) {
+ for (j = 0; j < 4; j++, sglist++) {
+ sgcomp->len[j] = rte_cpu_to_be_16(sglist->len);
+ sgcomp->iova[j] = rte_cpu_to_be_64(sglist->iova);
+ }
+ }
+}
+
+static int
+create_cipher_auth_inbuf(struct nitrox_softreq *sr,
+ struct nitrox_sglist *digest)
+{
+ int err;
+ struct nitrox_crypto_ctx *ctx = sr->ctx;
+
+ err = create_cipher_auth_sglist(sr, &sr->in, sr->op->sym->m_src);
+ if (unlikely(err))
+ return err;
+
+ if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY)
+ fill_sglist(&sr->in, digest->len, digest->iova, digest->virt);
+
+ create_sgcomp(&sr->in);
+ sr->dptr = sr->iova + offsetof(struct nitrox_softreq, in.sgcomp);
+ return 0;
+}
+
+static int
+create_cipher_auth_oop_outbuf(struct nitrox_softreq *sr,
+ struct nitrox_sglist *digest)
+{
+ int err;
+ struct nitrox_crypto_ctx *ctx = sr->ctx;
+
+ err = create_cipher_auth_sglist(sr, &sr->out, sr->op->sym->m_dst);
+ if (unlikely(err))
+ return err;
+
+ if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ fill_sglist(&sr->out, digest->len, digest->iova, digest->virt);
+
+ return 0;
+}
+
+static void
+create_cipher_auth_inplace_outbuf(struct nitrox_softreq *sr,
+ struct nitrox_sglist *digest)
+{
+ int i, cnt;
+ struct nitrox_crypto_ctx *ctx = sr->ctx;
+
+ cnt = sr->out.map_bufs_cnt;
+ for (i = 0; i < sr->in.map_bufs_cnt; i++, cnt++) {
+ sr->out.sglist[cnt].len = sr->in.sglist[i].len;
+ sr->out.sglist[cnt].iova = sr->in.sglist[i].iova;
+ sr->out.sglist[cnt].virt = sr->in.sglist[i].virt;
+ }
+
+ sr->out.map_bufs_cnt = cnt;
+ if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_GENERATE) {
+ fill_sglist(&sr->out, digest->len, digest->iova,
+ digest->virt);
+ } else if (ctx->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ sr->out.map_bufs_cnt--;
+ }
+}
+
+static int
+create_cipher_auth_outbuf(struct nitrox_softreq *sr,
+ struct nitrox_sglist *digest)
+{
+ struct rte_crypto_op *op = sr->op;
+ int cnt = 0;
+
+ sr->resp.orh = PENDING_SIG;
+ sr->out.sglist[cnt].len = sizeof(sr->resp.orh);
+ sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
+ resp.orh);
+ sr->out.sglist[cnt].virt = &sr->resp.orh;
+ cnt++;
+ sr->out.map_bufs_cnt = cnt;
+ if (op->sym->m_dst) {
+ int err;
+
+ err = create_cipher_auth_oop_outbuf(sr, digest);
+ if (unlikely(err))
+ return err;
+ } else {
+ create_cipher_auth_inplace_outbuf(sr, digest);
+ }
+
+ cnt = sr->out.map_bufs_cnt;
+ sr->resp.completion = PENDING_SIG;
+ sr->out.sglist[cnt].len = sizeof(sr->resp.completion);
+ sr->out.sglist[cnt].iova = sr->iova + offsetof(struct nitrox_softreq,
+ resp.completion);
+ sr->out.sglist[cnt].virt = &sr->resp.completion;
+ cnt++;
+ RTE_VERIFY(cnt <= MAX_SGBUF_CNT);
+ sr->out.map_bufs_cnt = cnt;
+
+ create_sgcomp(&sr->out);
+ sr->rptr = sr->iova + offsetof(struct nitrox_softreq, out.sgcomp);
+ return 0;
+}
+
+static void
+create_aead_gph(uint32_t cryptlen, uint16_t ivlen, uint32_t authlen,
+ struct gphdr *gph)
+{
+ int auth_only_len;
+ union {
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint16_t iv_offset : 8;
+ uint16_t auth_offset : 8;
+#else
+ uint16_t auth_offset : 8;
+ uint16_t iv_offset : 8;
+#endif
+ };
+ uint16_t value;
+ } param3;
+
+ gph->param0 = rte_cpu_to_be_16(cryptlen);
+ gph->param1 = rte_cpu_to_be_16(authlen);
+
+ auth_only_len = authlen - cryptlen;
+ gph->param2 = rte_cpu_to_be_16(ivlen + auth_only_len);
+
+ param3.iv_offset = 0;
+ param3.auth_offset = ivlen;
+ gph->param3 = rte_cpu_to_be_16(param3.value);
+}
+
+static int
+process_cipher_auth_data(struct nitrox_softreq *sr)
+{
+ struct rte_crypto_op *op = sr->op;
+ int err;
+ struct nitrox_sglist digest;
+
+ softreq_copy_iv(sr);
+ err = extract_cipher_auth_digest(sr, &digest);
+ if (unlikely(err))
+ return err;
+
+ err = create_cipher_auth_inbuf(sr, &digest);
+ if (unlikely(err))
+ return err;
+
+ err = create_cipher_auth_outbuf(sr, &digest);
+ if (unlikely(err))
+ return err;
+
+ create_aead_gph(op->sym->cipher.data.length, sr->iv.len,
+ op->sym->auth.data.length, &sr->gph);
+ return 0;
+}
+
+static int
+process_softreq(struct nitrox_softreq *sr)
+{
+ struct nitrox_crypto_ctx *ctx = sr->ctx;
+ int err = 0;
+
+ switch (ctx->nitrox_chain) {
+ case NITROX_CHAIN_CIPHER_AUTH:
+ case NITROX_CHAIN_AUTH_CIPHER:
+ err = process_cipher_auth_data(sr);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+int
+nitrox_process_se_req(uint16_t qno, struct rte_crypto_op *op,
+ struct nitrox_crypto_ctx *ctx,
+ struct nitrox_softreq *sr)
+{
+ softreq_init(sr, sr->iova);
+ sr->ctx = ctx;
+ sr->op = op;
+ process_softreq(sr);
+ create_se_instr(sr, qno);
+ sr->timeout = rte_get_timer_cycles() + CMD_TIMEOUT * rte_get_timer_hz();
+ return 0;
+}
+
+int
+nitrox_check_se_req(struct nitrox_softreq *sr, struct rte_crypto_op **op)
+{
+ uint64_t cc;
+ uint64_t orh;
+ int err;
+
+ cc = *(volatile uint64_t *)(&sr->resp.completion);
+ orh = *(volatile uint64_t *)(&sr->resp.orh);
+ if (cc != PENDING_SIG)
+ err = 0;
+ else if ((orh != PENDING_SIG) && (orh & 0xff))
+ err = orh & 0xff;
+ else if (rte_get_timer_cycles() >= sr->timeout)
+ err = 0xff;
+ else
+ return -EAGAIN;
+
+ if (unlikely(err))
+ NITROX_LOG(ERR, "Request err 0x%x, orh 0x%"PRIx64"\n", err,
+ sr->resp.orh);
+
+ *op = sr->op;
+ return err;
+}
+
+void *
+nitrox_sym_instr_addr(struct nitrox_softreq *sr)
+{
+ return &sr->instr;
+}
+
+static void
+req_pool_obj_init(__rte_unused struct rte_mempool *mp,
+ __rte_unused void *opaque, void *obj,
+ __rte_unused unsigned int obj_idx)
+{
+ softreq_init(obj, rte_mempool_virt2iova(obj));
+}
+
+struct rte_mempool *
+nitrox_sym_req_pool_create(struct rte_cryptodev *cdev, uint32_t nobjs,
+ uint16_t qp_id, int socket_id)
+{
+ char softreq_pool_name[RTE_RING_NAMESIZE];
+ struct rte_mempool *mp;
+
+ snprintf(softreq_pool_name, RTE_RING_NAMESIZE, "%s_sr_%d",
+ cdev->data->name, qp_id);
+ mp = rte_mempool_create(softreq_pool_name,
+ RTE_ALIGN_MUL_CEIL(nobjs, 64),
+ sizeof(struct nitrox_softreq),
+ 64, 0, NULL, NULL, req_pool_obj_init, NULL,
+ socket_id, 0);
+ if (unlikely(!mp))
+ NITROX_LOG(ERR, "Failed to create req pool, qid %d, err %d\n",
+ qp_id, rte_errno);
+
+ return mp;
+}
+
+void
+nitrox_sym_req_pool_free(struct rte_mempool *mp)
+{
+ rte_mempool_free(mp);
+}
diff --git a/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym_reqmgr.h b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym_reqmgr.h
new file mode 100644
index 000000000..fa2637bdb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/nitrox/nitrox_sym_reqmgr.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef _NITROX_SYM_REQMGR_H_
+#define _NITROX_SYM_REQMGR_H_
+
+#include "nitrox_sym_ctx.h"
+
+struct nitrox_qp;
+struct nitrox_softreq;
+
+int nitrox_process_se_req(uint16_t qno, struct rte_crypto_op *op,
+ struct nitrox_crypto_ctx *ctx,
+ struct nitrox_softreq *sr);
+int nitrox_check_se_req(struct nitrox_softreq *sr, struct rte_crypto_op **op);
+void *nitrox_sym_instr_addr(struct nitrox_softreq *sr);
+struct rte_mempool *nitrox_sym_req_pool_create(struct rte_cryptodev *cdev,
+ uint32_t nobjs, uint16_t qp_id,
+ int socket_id);
+void nitrox_sym_req_pool_free(struct rte_mempool *mp);
+
+#endif /* _NITROX_SYM_REQMGR_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/nitrox/rte_pmd_nitrox_version.map b/src/spdk/dpdk/drivers/crypto/nitrox/rte_pmd_nitrox_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/nitrox/rte_pmd_nitrox_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};