summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/crypto/virtio
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /src/spdk/dpdk/drivers/crypto/virtio
parentInitial commit. (diff)
downloadceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.tar.xz
ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/drivers/crypto/virtio')
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/Makefile35
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/meson.build8
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map3
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_algs.h28
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_capabilities.h51
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.c1505
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.h64
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_logs.h91
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.c462
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.h253
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_ring.h137
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtio_rxtx.c527
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtqueue.c43
-rw-r--r--src/spdk/dpdk/drivers/crypto/virtio/virtqueue.h171
14 files changed, 3378 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/Makefile b/src/spdk/dpdk/drivers/crypto/virtio/Makefile
new file mode 100644
index 00000000..be7b828f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_virtio_crypto.a
+
+#
+# include virtio_crypto.h
+#
+CFLAGS += -I$(RTE_SDK)/lib/librte_vhost
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_virtio_crypto_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtqueue.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_cryptodev.c
+
+# this lib depends upon:
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_pci -lrte_bus_pci
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/meson.build b/src/spdk/dpdk/drivers/crypto/virtio/meson.build
new file mode 100644
index 00000000..b15b3f9f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+
+includes += include_directories('../../../lib/librte_vhost')
+deps += 'bus_pci'
+name = 'virtio_crypto'
+sources = files('virtio_cryptodev.c', 'virtio_pci.c',
+ 'virtio_rxtx.c', 'virtqueue.c')
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map b/src/spdk/dpdk/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map
new file mode 100644
index 00000000..de8e412f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map
@@ -0,0 +1,3 @@
+DPDK_18.05 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_algs.h b/src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_algs.h
new file mode 100644
index 00000000..4c44af37
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_algs.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_CRYPTO_ALGS_H_
+#define _VIRTIO_CRYPTO_ALGS_H_
+
+#include <rte_memory.h>
+
+#include "virtio_crypto.h"
+
+struct virtio_crypto_session {
+ uint64_t session_id;
+
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } iv;
+
+ struct {
+ uint32_t length;
+ phys_addr_t phys_addr;
+ } aad;
+
+ struct virtio_crypto_op_ctrl_req ctrl;
+};
+
+#endif /* _VIRTIO_CRYPTO_ALGS_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_capabilities.h b/src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_capabilities.h
new file mode 100644
index 00000000..03c30dee
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_crypto_capabilities.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_CRYPTO_CAPABILITIES_H_
+#define _VIRTIO_CRYPTO_CAPABILITIES_H_
+
+#define VIRTIO_SYM_CAPABILITIES \
+ { /* SHA1 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 1, \
+ .max = 20, \
+ .increment = 1 \
+ }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }
+
+#endif /* _VIRTIO_CRYPTO_CAPABILITIES_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.c b/src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.c
new file mode 100644
index 00000000..568b5a40
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.c
@@ -0,0 +1,1505 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+#include <stdbool.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_eal.h>
+
+#include "virtio_cryptodev.h"
+#include "virtqueue.h"
+#include "virtio_crypto_algs.h"
+#include "virtio_crypto_capabilities.h"
+
+int virtio_crypto_logtype_init;
+int virtio_crypto_logtype_session;
+int virtio_crypto_logtype_rx;
+int virtio_crypto_logtype_tx;
+int virtio_crypto_logtype_driver;
+
+static int virtio_crypto_dev_configure(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *config);
+static int virtio_crypto_dev_start(struct rte_cryptodev *dev);
+static void virtio_crypto_dev_stop(struct rte_cryptodev *dev);
+static int virtio_crypto_dev_close(struct rte_cryptodev *dev);
+static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info);
+static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats);
+static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev);
+static int virtio_crypto_qp_setup(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id,
+ struct rte_mempool *session_pool);
+static int virtio_crypto_qp_release(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id);
+static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev);
+static unsigned int virtio_crypto_sym_get_session_private_size(
+ struct rte_cryptodev *dev);
+static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess);
+static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_mempool *mp);
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_virtio_crypto_map[] = {
+ { RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID,
+ VIRTIO_CRYPTO_PCI_DEVICEID) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static const struct rte_cryptodev_capabilities virtio_capabilities[] = {
+ VIRTIO_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+uint8_t cryptodev_virtio_driver_id;
+
+#define NUM_ENTRY_SYM_CREATE_SESSION 4
+
+static int
+virtio_crypto_send_command(struct virtqueue *vq,
+ struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key,
+ uint8_t *auth_key, struct virtio_crypto_session *session)
+{
+ uint8_t idx = 0;
+ uint8_t needed = 1;
+ uint32_t head = 0;
+ uint32_t len_cipher_key = 0;
+ uint32_t len_auth_key = 0;
+ uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
+ uint32_t len_session_input = sizeof(struct virtio_crypto_session_input);
+ uint32_t len_total = 0;
+ uint32_t input_offset = 0;
+ void *virt_addr_started = NULL;
+ phys_addr_t phys_addr_started;
+ struct vring_desc *desc;
+ uint32_t desc_offset;
+ struct virtio_crypto_session_input *input;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (session == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("session is NULL.");
+ return -EINVAL;
+ }
+ /* cipher only is supported, it is available if auth_key is NULL */
+ if (!cipher_key) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL.");
+ return -EINVAL;
+ }
+
+ head = vq->vq_desc_head_idx;
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx = %d, vq = %p",
+ head, vq);
+
+ if (vq->vq_free_cnt < needed) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Not enough entry");
+ return -ENOSPC;
+ }
+
+ /* calculate the length of cipher key */
+ if (cipher_key) {
+ switch (ctrl->u.sym_create_session.op_type) {
+ case VIRTIO_CRYPTO_SYM_OP_CIPHER:
+ len_cipher_key
+ = ctrl->u.sym_create_session.u.cipher
+ .para.keylen;
+ break;
+ case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
+ len_cipher_key
+ = ctrl->u.sym_create_session.u.chain
+ .para.cipher_param.keylen;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type");
+ return -EINVAL;
+ }
+ }
+
+ /* calculate the length of auth key */
+ if (auth_key) {
+ len_auth_key =
+ ctrl->u.sym_create_session.u.chain.para.u.mac_param
+ .auth_key_len;
+ }
+
+ /*
+ * malloc memory to store indirect vring_desc entries, including
+ * ctrl request, cipher key, auth key, session input and desc vring
+ */
+ desc_offset = len_ctrl_req + len_cipher_key + len_auth_key
+ + len_session_input;
+ virt_addr_started = rte_malloc(NULL,
+ desc_offset + NUM_ENTRY_SYM_CREATE_SESSION
+ * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
+ if (virt_addr_started == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap memory");
+ return -ENOSPC;
+ }
+ phys_addr_started = rte_malloc_virt2iova(virt_addr_started);
+
+ /* address to store indirect vring desc entries */
+ desc = (struct vring_desc *)
+ ((uint8_t *)virt_addr_started + desc_offset);
+
+ /* ctrl req part */
+ memcpy(virt_addr_started, ctrl, len_ctrl_req);
+ desc[idx].addr = phys_addr_started;
+ desc[idx].len = len_ctrl_req;
+ desc[idx].flags = VRING_DESC_F_NEXT;
+ desc[idx].next = idx + 1;
+ idx++;
+ len_total += len_ctrl_req;
+ input_offset += len_ctrl_req;
+
+ /* cipher key part */
+ if (len_cipher_key > 0) {
+ memcpy((uint8_t *)virt_addr_started + len_total,
+ cipher_key, len_cipher_key);
+
+ desc[idx].addr = phys_addr_started + len_total;
+ desc[idx].len = len_cipher_key;
+ desc[idx].flags = VRING_DESC_F_NEXT;
+ desc[idx].next = idx + 1;
+ idx++;
+ len_total += len_cipher_key;
+ input_offset += len_cipher_key;
+ }
+
+ /* auth key part */
+ if (len_auth_key > 0) {
+ memcpy((uint8_t *)virt_addr_started + len_total,
+ auth_key, len_auth_key);
+
+ desc[idx].addr = phys_addr_started + len_total;
+ desc[idx].len = len_auth_key;
+ desc[idx].flags = VRING_DESC_F_NEXT;
+ desc[idx].next = idx + 1;
+ idx++;
+ len_total += len_auth_key;
+ input_offset += len_auth_key;
+ }
+
+ /* input part */
+ input = (struct virtio_crypto_session_input *)
+ ((uint8_t *)virt_addr_started + input_offset);
+ input->status = VIRTIO_CRYPTO_ERR;
+ input->session_id = ~0ULL;
+ desc[idx].addr = phys_addr_started + len_total;
+ desc[idx].len = len_session_input;
+ desc[idx].flags = VRING_DESC_F_WRITE;
+ idx++;
+
+ /* use a single desc entry */
+ vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset;
+ vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc);
+ vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
+ vq->vq_free_cnt--;
+
+ vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
+
+ vq_update_avail_ring(vq, head);
+ vq_update_avail_idx(vq);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
+ vq->vq_queue_index);
+
+ virtqueue_notify(vq);
+
+ rte_rmb();
+ while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
+ rte_rmb();
+ usleep(100);
+ }
+
+ while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
+ uint32_t idx, desc_idx, used_idx;
+ struct vring_used_elem *uep;
+
+ used_idx = (uint32_t)(vq->vq_used_cons_idx
+ & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ idx = (uint32_t) uep->id;
+ desc_idx = idx;
+
+ while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
+ desc_idx = vq->vq_ring.desc[desc_idx].next;
+ vq->vq_free_cnt++;
+ }
+
+ vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+ vq->vq_desc_head_idx = idx;
+
+ vq->vq_used_cons_idx++;
+ vq->vq_free_cnt++;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
+ "vq->vq_desc_head_idx=%d",
+ vq->vq_free_cnt, vq->vq_desc_head_idx);
+
+ /* get the result */
+ if (input->status != VIRTIO_CRYPTO_OK) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Something wrong on backend! "
+ "status=%u, session_id=%" PRIu64 "",
+ input->status, input->session_id);
+ rte_free(virt_addr_started);
+ ret = -1;
+ } else {
+ session->session_id = input->session_id;
+
+ VIRTIO_CRYPTO_SESSION_LOG_INFO("Create session successfully, "
+ "session_id=%" PRIu64 "", input->session_id);
+ rte_free(virt_addr_started);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+void
+virtio_crypto_queue_release(struct virtqueue *vq)
+{
+ struct virtio_crypto_hw *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (vq) {
+ hw = vq->hw;
+ /* Select and deactivate the queue */
+ VTPCI_OPS(hw)->del_queue(hw, vq);
+
+ rte_memzone_free(vq->mz);
+ rte_mempool_free(vq->mpool);
+ rte_free(vq);
+ }
+}
+
+#define MPOOL_MAX_NAME_SZ 32
+
+int
+virtio_crypto_queue_setup(struct rte_cryptodev *dev,
+ int queue_type,
+ uint16_t vtpci_queue_idx,
+ uint16_t nb_desc,
+ int socket_id,
+ struct virtqueue **pvq)
+{
+ char vq_name[VIRTQUEUE_MAX_NAME_SZ];
+ char mpool_name[MPOOL_MAX_NAME_SZ];
+ const struct rte_memzone *mz;
+ unsigned int vq_size, size;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+ struct virtqueue *vq = NULL;
+ uint32_t i = 0;
+ uint32_t j;
+
+ PMD_INIT_FUNC_TRACE();
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx);
+
+ /*
+ * Read the virtqueue size from the Queue Size field
+ * Always power of 2 and if 0 virtqueue does not exist
+ */
+ vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
+ if (vq_size == 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist");
+ return -EINVAL;
+ }
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size);
+
+ if (!rte_is_power_of_2(vq_size)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2");
+ return -EINVAL;
+ }
+
+ if (queue_type == VTCRYPTO_DATAQ) {
+ snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d",
+ dev->data->dev_id, vtpci_queue_idx);
+ snprintf(mpool_name, sizeof(mpool_name),
+ "dev%d_dataqueue%d_mpool",
+ dev->data->dev_id, vtpci_queue_idx);
+ } else if (queue_type == VTCRYPTO_CTRLQ) {
+ snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue",
+ dev->data->dev_id);
+ snprintf(mpool_name, sizeof(mpool_name),
+ "dev%d_controlqueue_mpool",
+ dev->data->dev_id);
+ }
+ size = RTE_ALIGN_CEIL(sizeof(*vq) +
+ vq_size * sizeof(struct vq_desc_extra),
+ RTE_CACHE_LINE_SIZE);
+ vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (vq == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue");
+ return -ENOMEM;
+ }
+
+ if (queue_type == VTCRYPTO_DATAQ) {
+ /* pre-allocate a mempool and use it in the data plane to
+ * improve performance
+ */
+ vq->mpool = rte_mempool_lookup(mpool_name);
+ if (vq->mpool == NULL)
+ vq->mpool = rte_mempool_create(mpool_name,
+ vq_size,
+ sizeof(struct virtio_crypto_op_cookie),
+ RTE_CACHE_LINE_SIZE, 0,
+ NULL, NULL, NULL, NULL, socket_id,
+ 0);
+ if (!vq->mpool) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD "
+ "Cannot create mempool");
+ goto mpool_create_err;
+ }
+ for (i = 0; i < vq_size; i++) {
+ vq->vq_descx[i].cookie =
+ rte_zmalloc("crypto PMD op cookie pointer",
+ sizeof(struct virtio_crypto_op_cookie),
+ RTE_CACHE_LINE_SIZE);
+ if (vq->vq_descx[i].cookie == NULL) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to "
+ "alloc mem for cookie");
+ goto cookie_alloc_err;
+ }
+ }
+ }
+
+ vq->hw = hw;
+ vq->dev_id = dev->data->dev_id;
+ vq->vq_queue_index = vtpci_queue_idx;
+ vq->vq_nentries = vq_size;
+
+ /*
+ * Using part of the vring entries is permitted, but the maximum
+ * is vq_size
+ */
+ if (nb_desc == 0 || nb_desc > vq_size)
+ nb_desc = vq_size;
+ vq->vq_free_cnt = nb_desc;
+
+ /*
+ * Reserve a memzone for vring elements
+ */
+ size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
+ vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d",
+ (queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq",
+ size, vq->vq_ring_size);
+
+ mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
+ socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
+ if (mz == NULL) {
+ if (rte_errno == EEXIST)
+ mz = rte_memzone_lookup(vq_name);
+ if (mz == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory");
+ goto mz_reserve_err;
+ }
+ }
+
+ /*
+ * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
+ * and only accepts 32 bit page frame number.
+ * Check if the allocated physical memory exceeds 16TB.
+ */
+ if ((mz->phys_addr + vq->vq_ring_size - 1)
+ >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be "
+ "above 16TB!");
+ goto vring_addr_err;
+ }
+
+ memset(mz->addr, 0, sizeof(mz->len));
+ vq->mz = mz;
+ vq->vq_ring_mem = mz->phys_addr;
+ vq->vq_ring_virt_mem = mz->addr;
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64,
+ (uint64_t)mz->phys_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64,
+ (uint64_t)(uintptr_t)mz->addr);
+
+ *pvq = vq;
+
+ return 0;
+
+vring_addr_err:
+ rte_memzone_free(mz);
+mz_reserve_err:
+cookie_alloc_err:
+ rte_mempool_free(vq->mpool);
+ if (i != 0) {
+ for (j = 0; j < i; j++)
+ rte_free(vq->vq_descx[j].cookie);
+ }
+mpool_create_err:
+ rte_free(vq);
+ return -ENOMEM;
+}
+
+static int
+virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx)
+{
+ int ret;
+ struct virtqueue *vq;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ /* if virtio device has started, do not touch the virtqueues */
+ if (dev->data->dev_started)
+ return 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx,
+ 0, SOCKET_ID_ANY, &vq);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed");
+ return ret;
+ }
+
+ hw->cvq = vq;
+
+ return 0;
+}
+
+static void
+virtio_crypto_free_queues(struct rte_cryptodev *dev)
+{
+ unsigned int i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* control queue release */
+ virtio_crypto_queue_release(hw->cvq);
+
+ /* data queue release */
+ for (i = 0; i < hw->max_dataqueues; i++)
+ virtio_crypto_queue_release(dev->data->queue_pairs[i]);
+}
+
+static int
+virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused)
+{
+ return 0;
+}
+
+/*
+ * dev_ops for virtio, bare necessities for basic operation
+ */
+static struct rte_cryptodev_ops virtio_crypto_dev_ops = {
+ /* Device related operations */
+ .dev_configure = virtio_crypto_dev_configure,
+ .dev_start = virtio_crypto_dev_start,
+ .dev_stop = virtio_crypto_dev_stop,
+ .dev_close = virtio_crypto_dev_close,
+ .dev_infos_get = virtio_crypto_dev_info_get,
+
+ .stats_get = virtio_crypto_dev_stats_get,
+ .stats_reset = virtio_crypto_dev_stats_reset,
+
+ .queue_pair_setup = virtio_crypto_qp_setup,
+ .queue_pair_release = virtio_crypto_qp_release,
+ .queue_pair_count = NULL,
+
+ /* Crypto related operations */
+ .sym_session_get_size = virtio_crypto_sym_get_session_private_size,
+ .sym_session_configure = virtio_crypto_sym_configure_session,
+ .sym_session_clear = virtio_crypto_sym_clear_session
+};
+
+static void
+virtio_crypto_update_stats(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ unsigned int i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (stats == NULL) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("invalid pointer");
+ return;
+ }
+
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ const struct virtqueue *data_queue
+ = dev->data->queue_pairs[i];
+ if (data_queue == NULL)
+ continue;
+
+ stats->enqueued_count += data_queue->packets_sent_total;
+ stats->enqueue_err_count += data_queue->packets_sent_failed;
+
+ stats->dequeued_count += data_queue->packets_received_total;
+ stats->dequeue_err_count
+ += data_queue->packets_received_failed;
+ }
+}
+
+static void
+virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ virtio_crypto_update_stats(dev, stats);
+}
+
+static void
+virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev)
+{
+ unsigned int i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ struct virtqueue *data_queue = dev->data->queue_pairs[i];
+ if (data_queue == NULL)
+ continue;
+
+ data_queue->packets_sent_total = 0;
+ data_queue->packets_sent_failed = 0;
+
+ data_queue->packets_received_total = 0;
+ data_queue->packets_received_failed = 0;
+ }
+}
+
+static int
+virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id,
+ struct rte_mempool *session_pool __rte_unused)
+{
+ int ret;
+ struct virtqueue *vq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* if virtio dev is started, do not touch the virtqueues */
+ if (dev->data->dev_started)
+ return 0;
+
+ ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id,
+ qp_conf->nb_descriptors, socket_id, &vq);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "virtio crypto data queue initialization failed\n");
+ return ret;
+ }
+
+ dev->data->queue_pairs[queue_pair_id] = vq;
+
+ return 0;
+}
+
+static int
+virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+ struct virtqueue *vq
+ = (struct virtqueue *)dev->data->queue_pairs[queue_pair_id];
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (vq == NULL) {
+ VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed");
+ return 0;
+ }
+
+ virtio_crypto_queue_release(vq);
+ return 0;
+}
+
+static int
+virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features)
+{
+ uint64_t host_features;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Prepare guest_features: feature that driver wants to support */
+ VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64,
+ req_features);
+
+ /* Read device(host) feature bits */
+ host_features = VTPCI_OPS(hw)->get_features(hw);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64,
+ host_features);
+
+ /*
+ * Negotiate features: Subset of device feature bits are written back
+ * guest feature bits.
+ */
+ hw->guest_features = req_features;
+ hw->guest_features = vtpci_cryptodev_negotiate_features(hw,
+ host_features);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64,
+ hw->guest_features);
+
+ if (hw->modern) {
+ if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "VIRTIO_F_VERSION_1 features is not enabled.");
+ return -1;
+ }
+ vtpci_cryptodev_set_status(hw,
+ VIRTIO_CONFIG_STATUS_FEATURES_OK);
+ if (!(vtpci_cryptodev_get_status(hw) &
+ VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK "
+ "status!");
+ return -1;
+ }
+ }
+
+ hw->req_guest_features = req_features;
+
+ return 0;
+}
+
+/* reset device and renegotiate features if needed */
+static int
+virtio_crypto_init_device(struct rte_cryptodev *cryptodev,
+ uint64_t req_features)
+{
+ struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
+ struct virtio_crypto_config local_config;
+ struct virtio_crypto_config *config = &local_config;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Reset the device although not necessary at startup */
+ vtpci_cryptodev_reset(hw);
+
+ /* Tell the host we've noticed this device. */
+ vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
+
+ /* Tell the host we've known how to drive the device. */
+ vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
+ if (virtio_negotiate_features(hw, req_features) < 0)
+ return -1;
+
+ /* Get status of the device */
+ vtpci_read_cryptodev_config(hw,
+ offsetof(struct virtio_crypto_config, status),
+ &config->status, sizeof(config->status));
+ if (config->status != VIRTIO_CRYPTO_S_HW_READY) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is "
+ "not ready");
+ return -1;
+ }
+
+ /* Get number of data queues */
+ vtpci_read_cryptodev_config(hw,
+ offsetof(struct virtio_crypto_config, max_dataqueues),
+ &config->max_dataqueues,
+ sizeof(config->max_dataqueues));
+ hw->max_dataqueues = config->max_dataqueues;
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d",
+ hw->max_dataqueues);
+
+ return 0;
+}
+
+/*
+ * This function is based on probe() function
+ * It returns 0 on success.
+ */
+static int
+crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *cryptodev;
+ struct virtio_crypto_hw *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
+ init_params);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ cryptodev->driver_id = cryptodev_virtio_driver_id;
+ cryptodev->dev_ops = &virtio_crypto_dev_ops;
+
+ cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst;
+ cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;
+
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+ hw = cryptodev->data->dev_private;
+ hw->dev_id = cryptodev->data->dev_id;
+ hw->virtio_dev_capabilities = virtio_capabilities;
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
+ cryptodev->data->dev_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+
+ /* pci device init */
+ if (vtpci_cryptodev_init(pci_dev, hw))
+ return -1;
+
+ if (virtio_crypto_init_device(cryptodev,
+ VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev)
+{
+ struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ return -EPERM;
+
+ if (cryptodev->data->dev_started) {
+ virtio_crypto_dev_stop(cryptodev);
+ virtio_crypto_dev_close(cryptodev);
+ }
+
+ cryptodev->dev_ops = NULL;
+ cryptodev->enqueue_burst = NULL;
+ cryptodev->dequeue_burst = NULL;
+
+ /* release control queue */
+ virtio_crypto_queue_release(hw->cvq);
+
+ rte_free(cryptodev->data);
+ cryptodev->data = NULL;
+
+ VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed");
+
+ return 0;
+}
+
+static int
+virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (virtio_crypto_init_device(cryptodev,
+ VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
+ return -1;
+
+ /* setup control queue
+ * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues
+ * config->max_dataqueues is the control queue
+ */
+ if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error");
+ return -1;
+ }
+ virtio_crypto_ctrlq_start(cryptodev);
+
+ return 0;
+}
+
+static void
+virtio_crypto_dev_stop(struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop");
+
+ vtpci_cryptodev_reset(hw);
+
+ virtio_crypto_dev_free_mbufs(dev);
+ virtio_crypto_free_queues(dev);
+
+ dev->data->dev_started = 0;
+}
+
+static int
+virtio_crypto_dev_start(struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ if (dev->data->dev_started)
+ return 0;
+
+ /* Do final configuration before queue engine starts */
+ virtio_crypto_dataq_start(dev);
+ vtpci_cryptodev_reinit_complete(hw);
+
+ dev->data->dev_started = 1;
+
+ return 0;
+}
+
+static void
+virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev)
+{
+ uint32_t i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used "
+ "and unused buf", i);
+ VIRTQUEUE_DUMP((struct virtqueue *)
+ dev->data->queue_pairs[i]);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p",
+ i, dev->data->queue_pairs[i]);
+
+ virtqueue_detatch_unused(dev->data->queue_pairs[i]);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and "
+ "unused buf", i);
+ VIRTQUEUE_DUMP(
+ (struct virtqueue *)dev->data->queue_pairs[i]);
+ }
+}
+
+static unsigned int
+virtio_crypto_sym_get_session_private_size(
+ struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16);
+}
+
+static int
+virtio_crypto_check_sym_session_paras(
+ struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(dev == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("dev is NULL");
+ return -1;
+ }
+ if (unlikely(dev->data == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("dev->data is NULL");
+ return -1;
+ }
+ hw = dev->data->dev_private;
+ if (unlikely(hw == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("hw is NULL");
+ return -1;
+ }
+ if (unlikely(hw->cvq == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("vq is NULL");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_check_sym_clear_session_paras(
+ struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (sess == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("sym_session is NULL");
+ return -1;
+ }
+
+ return virtio_crypto_check_sym_session_paras(dev);
+}
+
+#define NUM_ENTRY_SYM_CLEAR_SESSION 2
+
+static void
+virtio_crypto_sym_clear_session(
+ struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ struct virtio_crypto_hw *hw;
+ struct virtqueue *vq;
+ struct virtio_crypto_session *session;
+ struct virtio_crypto_op_ctrl_req *ctrl;
+ struct vring_desc *desc;
+ uint8_t *status;
+ uint8_t needed = 1;
+ uint32_t head;
+ uint8_t *malloc_virt_addr;
+ uint64_t malloc_phys_addr;
+ uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
+ uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
+ uint32_t desc_offset = len_op_ctrl_req + len_inhdr;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)
+ return;
+
+ hw = dev->data->dev_private;
+ vq = hw->cvq;
+ session = (struct virtio_crypto_session *)get_sym_session_private_data(
+ sess, cryptodev_virtio_driver_id);
+ if (session == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid session parameter");
+ return;
+ }
+
+ VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, "
+ "vq = %p", vq->vq_desc_head_idx, vq);
+
+ if (vq->vq_free_cnt < needed) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "vq->vq_free_cnt = %d is less than %d, "
+ "not enough", vq->vq_free_cnt, needed);
+ return;
+ }
+
+ /*
+ * malloc memory to store information of ctrl request op,
+ * returned status and desc vring
+ */
+ malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
+ + NUM_ENTRY_SYM_CLEAR_SESSION
+ * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
+ if (malloc_virt_addr == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room");
+ return;
+ }
+ malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr);
+
+ /* assign ctrl request op part */
+ ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
+ ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;
+ /* default data virtqueue is 0 */
+ ctrl->header.queue_id = 0;
+ ctrl->u.destroy_session.session_id = session->session_id;
+
+ /* status part */
+ status = &(((struct virtio_crypto_inhdr *)
+ ((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status);
+ *status = VIRTIO_CRYPTO_ERR;
+
+ /* indirect desc vring part */
+ desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr
+ + desc_offset);
+
+ /* ctrl request part */
+ desc[0].addr = malloc_phys_addr;
+ desc[0].len = len_op_ctrl_req;
+ desc[0].flags = VRING_DESC_F_NEXT;
+ desc[0].next = 1;
+
+ /* status part */
+ desc[1].addr = malloc_phys_addr + len_op_ctrl_req;
+ desc[1].len = len_inhdr;
+ desc[1].flags = VRING_DESC_F_WRITE;
+
+ /* use only a single desc entry */
+ head = vq->vq_desc_head_idx;
+ vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
+ vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset;
+ vq->vq_ring.desc[head].len
+ = NUM_ENTRY_SYM_CLEAR_SESSION
+ * sizeof(struct vring_desc);
+ vq->vq_free_cnt -= needed;
+
+ vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
+
+ vq_update_avail_ring(vq, head);
+ vq_update_avail_idx(vq);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
+ vq->vq_queue_index);
+
+ virtqueue_notify(vq);
+
+ rte_rmb();
+ while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
+ rte_rmb();
+ usleep(100);
+ }
+
+ while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
+ uint32_t idx, desc_idx, used_idx;
+ struct vring_used_elem *uep;
+
+ used_idx = (uint32_t)(vq->vq_used_cons_idx
+ & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ idx = (uint32_t) uep->id;
+ desc_idx = idx;
+ while (vq->vq_ring.desc[desc_idx].flags
+ & VRING_DESC_F_NEXT) {
+ desc_idx = vq->vq_ring.desc[desc_idx].next;
+ vq->vq_free_cnt++;
+ }
+
+ vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+ vq->vq_desc_head_idx = idx;
+ vq->vq_used_cons_idx++;
+ vq->vq_free_cnt++;
+ }
+
+ if (*status != VIRTIO_CRYPTO_OK) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed "
+ "status=%"PRIu32", session_id=%"PRIu64"",
+ *status, session->session_id);
+ rte_free(malloc_virt_addr);
+ return;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
+ "vq->vq_desc_head_idx=%d",
+ vq->vq_free_cnt, vq->vq_desc_head_idx);
+
+ VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ",
+ session->session_id);
+
+ memset(session, 0, sizeof(struct virtio_crypto_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(session);
+ set_sym_session_private_data(sess, cryptodev_virtio_driver_id, NULL);
+ rte_mempool_put(sess_mp, session);
+ rte_free(malloc_virt_addr);
+}
+
+static struct rte_crypto_cipher_xform *
+virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return &xform->cipher;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+static struct rte_crypto_auth_xform *
+virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return &xform->auth;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+/** Get xform chain order */
+static int
+virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return -1;
+
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next == NULL)
+ return VIRTIO_CRYPTO_CMD_CIPHER;
+
+ /* Authentication Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL)
+ return VIRTIO_CRYPTO_CMD_AUTH;
+
+ /* Authenticate then Cipher */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return VIRTIO_CRYPTO_CMD_HASH_CIPHER;
+
+ /* Cipher then Authenticate */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return VIRTIO_CRYPTO_CMD_CIPHER_HASH;
+
+ return -1;
+}
+
+static int
+virtio_crypto_sym_pad_cipher_param(
+ struct virtio_crypto_cipher_session_para *para,
+ struct rte_crypto_cipher_xform *cipher_xform)
+{
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ para->algo = VIRTIO_CRYPTO_CIPHER_AES_CBC;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Crypto: Unsupported "
+ "Cipher alg %u", cipher_xform->algo);
+ return -1;
+ }
+
+ para->keylen = cipher_xform->key.length;
+ switch (cipher_xform->op) {
+ case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
+ para->op = VIRTIO_CRYPTO_OP_ENCRYPT;
+ break;
+ case RTE_CRYPTO_CIPHER_OP_DECRYPT:
+ para->op = VIRTIO_CRYPTO_OP_DECRYPT;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported cipher operation "
+ "parameter");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_sym_pad_auth_param(
+ struct virtio_crypto_op_ctrl_req *ctrl,
+ struct rte_crypto_auth_xform *auth_xform)
+{
+ uint32_t *algo;
+ struct virtio_crypto_alg_chain_session_para *para =
+ &(ctrl->u.sym_create_session.u.chain.para);
+
+ switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) {
+ case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN:
+ algo = &(para->u.hash_param.algo);
+ break;
+ case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH:
+ algo = &(para->u.mac_param.algo);
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported hash mode %u "
+ "specified",
+ ctrl->u.sym_create_session.u.chain.para.hash_mode);
+ return -1;
+ }
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ *algo = VIRTIO_CRYPTO_MAC_HMAC_SHA1;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "Crypto: Undefined Hash algo %u specified",
+ auth_xform->algo);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_sym_pad_op_ctrl_req(
+ struct virtio_crypto_op_ctrl_req *ctrl,
+ struct rte_crypto_sym_xform *xform, bool is_chainned,
+ uint8_t **cipher_key_data, uint8_t **auth_key_data,
+ struct virtio_crypto_session *session)
+{
+ int ret;
+ struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+
+ /* Get cipher xform from crypto xform chain */
+ cipher_xform = virtio_crypto_get_cipher_xform(xform);
+ if (cipher_xform) {
+ if (cipher_xform->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "cipher IV size cannot be longer than %u",
+ VIRTIO_CRYPTO_MAX_IV_SIZE);
+ return -1;
+ }
+ if (is_chainned)
+ ret = virtio_crypto_sym_pad_cipher_param(
+ &ctrl->u.sym_create_session.u.chain.para
+ .cipher_param, cipher_xform);
+ else
+ ret = virtio_crypto_sym_pad_cipher_param(
+ &ctrl->u.sym_create_session.u.cipher.para,
+ cipher_xform);
+
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "pad cipher parameter failed");
+ return -1;
+ }
+
+ *cipher_key_data = cipher_xform->key.data;
+
+ session->iv.offset = cipher_xform->iv.offset;
+ session->iv.length = cipher_xform->iv.length;
+ }
+
+ /* Get auth xform from crypto xform chain */
+ auth_xform = virtio_crypto_get_auth_xform(xform);
+ if (auth_xform) {
+ /* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
+ struct virtio_crypto_alg_chain_session_para *para =
+ &(ctrl->u.sym_create_session.u.chain.para);
+ if (auth_xform->key.length) {
+ para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH;
+ para->u.mac_param.auth_key_len =
+ (uint32_t)auth_xform->key.length;
+ para->u.mac_param.hash_result_len =
+ auth_xform->digest_length;
+
+ *auth_key_data = auth_xform->key.data;
+ } else {
+ para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN;
+ para->u.hash_param.hash_result_len =
+ auth_xform->digest_length;
+ }
+
+ ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("pad auth parameter "
+ "failed");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_check_sym_configure_session_paras(
+ struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sym_sess,
+ struct rte_mempool *mempool)
+{
+ if (unlikely(xform == NULL) || unlikely(sym_sess == NULL) ||
+ unlikely(mempool == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer");
+ return -1;
+ }
+
+ if (virtio_crypto_check_sym_session_paras(dev) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+virtio_crypto_sym_configure_session(
+ struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ int ret;
+ struct virtio_crypto_session crypto_sess;
+ void *session_private = &crypto_sess;
+ struct virtio_crypto_session *session;
+ struct virtio_crypto_op_ctrl_req *ctrl_req;
+ enum virtio_crypto_cmd_id cmd_id;
+ uint8_t *cipher_key_data = NULL;
+ uint8_t *auth_key_data = NULL;
+ struct virtio_crypto_hw *hw;
+ struct virtqueue *control_vq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = virtio_crypto_check_sym_configure_session_paras(dev, xform,
+ sess, mempool);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters");
+ return ret;
+ }
+
+ if (rte_mempool_get(mempool, &session_private)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ session = (struct virtio_crypto_session *)session_private;
+ memset(session, 0, sizeof(struct virtio_crypto_session));
+ ctrl_req = &session->ctrl;
+ ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION;
+ /* FIXME: support multiqueue */
+ ctrl_req->header.queue_id = 0;
+
+ hw = dev->data->dev_private;
+ control_vq = hw->cvq;
+
+ cmd_id = virtio_crypto_get_chain_order(xform);
+ if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH)
+ ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
+ = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
+ if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER)
+ ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
+ = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
+
+ switch (cmd_id) {
+ case VIRTIO_CRYPTO_CMD_CIPHER_HASH:
+ case VIRTIO_CRYPTO_CMD_HASH_CIPHER:
+ ctrl_req->u.sym_create_session.op_type
+ = VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
+
+ ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req,
+ xform, true, &cipher_key_data, &auth_key_data, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "padding sym op ctrl req failed");
+ goto error_out;
+ }
+ ret = virtio_crypto_send_command(control_vq, ctrl_req,
+ cipher_key_data, auth_key_data, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "create session failed: %d", ret);
+ goto error_out;
+ }
+ break;
+ case VIRTIO_CRYPTO_CMD_CIPHER:
+ ctrl_req->u.sym_create_session.op_type
+ = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+ ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform,
+ false, &cipher_key_data, &auth_key_data, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "padding sym op ctrl req failed");
+ goto error_out;
+ }
+ ret = virtio_crypto_send_command(control_vq, ctrl_req,
+ cipher_key_data, NULL, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "create session failed: %d", ret);
+ goto error_out;
+ }
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "Unsupported operation chain order parameter");
+ goto error_out;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ session_private);
+
+ return 0;
+
+error_out:
+ return -1;
+}
+
+static void
+virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (info != NULL) {
+ info->driver_id = cryptodev_virtio_driver_id;
+ info->feature_flags = dev->feature_flags;
+ info->max_nb_queue_pairs = hw->max_dataqueues;
+ /* No limit of number of sessions */
+ info->sym.max_nb_sessions = 0;
+ info->capabilities = hw->virtio_dev_capabilities;
+ }
+}
+
+static int
+crypto_virtio_pci_probe(
+ struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = rte_socket_id(),
+ .private_data_size = sizeof(struct virtio_crypto_hw)
+ };
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x",
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ return crypto_virtio_create(name, pci_dev, &init_params);
+}
+
+static int
+crypto_virtio_pci_remove(
+ struct rte_pci_device *pci_dev __rte_unused)
+{
+ struct rte_cryptodev *cryptodev;
+ char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, cryptodev_name,
+ sizeof(cryptodev_name));
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return virtio_crypto_dev_uninit(cryptodev);
+}
+
+static struct rte_pci_driver rte_virtio_crypto_driver = {
+ .id_table = pci_id_virtio_crypto_map,
+ .drv_flags = 0,
+ .probe = crypto_virtio_pci_probe,
+ .remove = crypto_virtio_pci_remove
+};
+
+static struct cryptodev_driver virtio_crypto_drv;
+
+RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv,
+ rte_virtio_crypto_driver.driver,
+ cryptodev_virtio_driver_id);
+
+RTE_INIT(virtio_crypto_init_log)
+{
+ virtio_crypto_logtype_init = rte_log_register("pmd.crypto.virtio.init");
+ if (virtio_crypto_logtype_init >= 0)
+ rte_log_set_level(virtio_crypto_logtype_init, RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_session =
+ rte_log_register("pmd.crypto.virtio.session");
+ if (virtio_crypto_logtype_session >= 0)
+ rte_log_set_level(virtio_crypto_logtype_session,
+ RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_rx = rte_log_register("pmd.crypto.virtio.rx");
+ if (virtio_crypto_logtype_rx >= 0)
+ rte_log_set_level(virtio_crypto_logtype_rx, RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_tx = rte_log_register("pmd.crypto.virtio.tx");
+ if (virtio_crypto_logtype_tx >= 0)
+ rte_log_set_level(virtio_crypto_logtype_tx, RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_driver =
+ rte_log_register("pmd.crypto.virtio.driver");
+ if (virtio_crypto_logtype_driver >= 0)
+ rte_log_set_level(virtio_crypto_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.h b/src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.h
new file mode 100644
index 00000000..0fd7b722
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_cryptodev.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_CRYPTODEV_H_
+#define _VIRTIO_CRYPTODEV_H_
+
+#include "virtio_crypto.h"
+#include "virtio_pci.h"
+#include "virtio_ring.h"
+
+/* Features desired/implemented by this driver. */
+#define VIRTIO_CRYPTO_PMD_GUEST_FEATURES (1ULL << VIRTIO_F_VERSION_1)
+
+#define CRYPTODEV_NAME_VIRTIO_PMD crypto_virtio
+
+#define NUM_ENTRY_VIRTIO_CRYPTO_OP 7
+
+#define VIRTIO_CRYPTO_MAX_IV_SIZE 16
+
+extern uint8_t cryptodev_virtio_driver_id;
+
+enum virtio_crypto_cmd_id {
+ VIRTIO_CRYPTO_CMD_CIPHER = 0,
+ VIRTIO_CRYPTO_CMD_AUTH = 1,
+ VIRTIO_CRYPTO_CMD_CIPHER_HASH = 2,
+ VIRTIO_CRYPTO_CMD_HASH_CIPHER = 3
+};
+
+struct virtio_crypto_op_cookie {
+ struct virtio_crypto_op_data_req data_req;
+ struct virtio_crypto_inhdr inhdr;
+ struct vring_desc desc[NUM_ENTRY_VIRTIO_CRYPTO_OP];
+ uint8_t iv[VIRTIO_CRYPTO_MAX_IV_SIZE];
+};
+
+/*
+ * Control queue function prototype
+ */
+void virtio_crypto_ctrlq_start(struct rte_cryptodev *dev);
+
+/*
+ * Data queue function prototype
+ */
+void virtio_crypto_dataq_start(struct rte_cryptodev *dev);
+
+int virtio_crypto_queue_setup(struct rte_cryptodev *dev,
+ int queue_type,
+ uint16_t vtpci_queue_idx,
+ uint16_t nb_desc,
+ int socket_id,
+ struct virtqueue **pvq);
+
+void virtio_crypto_queue_release(struct virtqueue *vq);
+
+uint16_t virtio_crypto_pkt_tx_burst(void *tx_queue,
+ struct rte_crypto_op **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t virtio_crypto_pkt_rx_burst(void *tx_queue,
+ struct rte_crypto_op **tx_pkts,
+ uint16_t nb_pkts);
+
+#endif /* _VIRTIO_CRYPTODEV_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_logs.h b/src/spdk/dpdk/drivers/crypto/virtio/virtio_logs.h
new file mode 100644
index 00000000..26a286cf
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_logs.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_LOGS_H_
+#define _VIRTIO_LOGS_H_
+
+#include <rte_log.h>
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
+ "PMD: %s(): " fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+extern int virtio_crypto_logtype_init;
+
+#define VIRTIO_CRYPTO_INIT_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_init, \
+ "INIT: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_INIT_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_INIT_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_INIT_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_INIT_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_INIT_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_INIT_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_session;
+
+#define VIRTIO_CRYPTO_SESSION_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_session, \
+ "SESSION: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_SESSION_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_SESSION_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_SESSION_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_SESSION_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_SESSION_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_SESSION_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_rx;
+
+#define VIRTIO_CRYPTO_RX_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_rx, \
+ "RX: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_RX_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_RX_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_RX_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_RX_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_RX_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_RX_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_tx;
+
+#define VIRTIO_CRYPTO_TX_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_tx, \
+ "TX: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_TX_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_TX_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_TX_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_TX_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_TX_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_TX_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_driver;
+
+#define VIRTIO_CRYPTO_DRV_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_driver, \
+ "DRIVER: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_DRV_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_DRV_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_DRV_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_DRV_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_DRV_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_DRV_LOG_IMPL(ERR, fmt, ## args)
+
+#endif /* _VIRTIO_LOGS_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.c b/src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.c
new file mode 100644
index 00000000..832c465b
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.c
@@ -0,0 +1,462 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#include <stdint.h>
+
+#ifdef RTE_EXEC_ENV_LINUXAPP
+ #include <dirent.h>
+ #include <fcntl.h>
+#endif
+
+#include <rte_io.h>
+#include <rte_bus.h>
+
+#include "virtio_pci.h"
+#include "virtqueue.h"
+
+/*
+ * Following macros are derived from linux/pci_regs.h, however,
+ * we can't simply include that header here, as there is no such
+ * file for non-Linux platform.
+ */
+#define PCI_CAPABILITY_LIST 0x34
+#define PCI_CAP_ID_VNDR 0x09
+#define PCI_CAP_ID_MSIX 0x11
+
+/*
+ * The remaining space is defined by each driver as the per-driver
+ * configuration space.
+ */
+#define VIRTIO_PCI_CONFIG(hw) \
+ (((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20)
+
+struct virtio_hw_internal virtio_hw_internal[RTE_MAX_VIRTIO_CRYPTO];
+
+static inline int
+check_vq_phys_addr_ok(struct virtqueue *vq)
+{
+ /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
+ * and only accepts 32 bit page frame number.
+ * Check if the allocated physical memory exceeds 16TB.
+ */
+ if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >>
+ (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be above 16TB!");
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline void
+io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
+{
+ rte_write32(val & ((1ULL << 32) - 1), lo);
+ rte_write32(val >> 32, hi);
+}
+
+static void
+modern_read_dev_config(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int length)
+{
+ int i;
+ uint8_t *p;
+ uint8_t old_gen, new_gen;
+
+ do {
+ old_gen = rte_read8(&hw->common_cfg->config_generation);
+
+ p = dst;
+ for (i = 0; i < length; i++)
+ *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i);
+
+ new_gen = rte_read8(&hw->common_cfg->config_generation);
+ } while (old_gen != new_gen);
+}
+
+static void
+modern_write_dev_config(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int length)
+{
+ int i;
+ const uint8_t *p = src;
+
+ for (i = 0; i < length; i++)
+ rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i));
+}
+
+static uint64_t
+modern_get_features(struct virtio_crypto_hw *hw)
+{
+ uint32_t features_lo, features_hi;
+
+ rte_write32(0, &hw->common_cfg->device_feature_select);
+ features_lo = rte_read32(&hw->common_cfg->device_feature);
+
+ rte_write32(1, &hw->common_cfg->device_feature_select);
+ features_hi = rte_read32(&hw->common_cfg->device_feature);
+
+ return ((uint64_t)features_hi << 32) | features_lo;
+}
+
+static void
+modern_set_features(struct virtio_crypto_hw *hw, uint64_t features)
+{
+ rte_write32(0, &hw->common_cfg->guest_feature_select);
+ rte_write32(features & ((1ULL << 32) - 1),
+ &hw->common_cfg->guest_feature);
+
+ rte_write32(1, &hw->common_cfg->guest_feature_select);
+ rte_write32(features >> 32,
+ &hw->common_cfg->guest_feature);
+}
+
+static uint8_t
+modern_get_status(struct virtio_crypto_hw *hw)
+{
+ return rte_read8(&hw->common_cfg->device_status);
+}
+
+static void
+modern_set_status(struct virtio_crypto_hw *hw, uint8_t status)
+{
+ rte_write8(status, &hw->common_cfg->device_status);
+}
+
+static void
+modern_reset(struct virtio_crypto_hw *hw)
+{
+ modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
+ modern_get_status(hw);
+}
+
+static uint8_t
+modern_get_isr(struct virtio_crypto_hw *hw)
+{
+ return rte_read8(hw->isr);
+}
+
+static uint16_t
+modern_set_config_irq(struct virtio_crypto_hw *hw, uint16_t vec)
+{
+ rte_write16(vec, &hw->common_cfg->msix_config);
+ return rte_read16(&hw->common_cfg->msix_config);
+}
+
+static uint16_t
+modern_set_queue_irq(struct virtio_crypto_hw *hw, struct virtqueue *vq,
+ uint16_t vec)
+{
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+ rte_write16(vec, &hw->common_cfg->queue_msix_vector);
+ return rte_read16(&hw->common_cfg->queue_msix_vector);
+}
+
+static uint16_t
+modern_get_queue_num(struct virtio_crypto_hw *hw, uint16_t queue_id)
+{
+ rte_write16(queue_id, &hw->common_cfg->queue_select);
+ return rte_read16(&hw->common_cfg->queue_size);
+}
+
+static int
+modern_setup_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
+{
+ uint64_t desc_addr, avail_addr, used_addr;
+ uint16_t notify_off;
+
+ if (!check_vq_phys_addr_ok(vq))
+ return -1;
+
+ desc_addr = vq->vq_ring_mem;
+ avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
+ used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
+ ring[vq->vq_nentries]),
+ VIRTIO_PCI_VRING_ALIGN);
+
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+
+ io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,
+ &hw->common_cfg->queue_desc_hi);
+ io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo,
+ &hw->common_cfg->queue_avail_hi);
+ io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,
+ &hw->common_cfg->queue_used_hi);
+
+ notify_off = rte_read16(&hw->common_cfg->queue_notify_off);
+ vq->notify_addr = (void *)((uint8_t *)hw->notify_base +
+ notify_off * hw->notify_off_multiplier);
+
+ rte_write16(1, &hw->common_cfg->queue_enable);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("queue %u addresses:", vq->vq_queue_index);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t desc_addr: %" PRIx64, desc_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t aval_addr: %" PRIx64, avail_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t used_addr: %" PRIx64, used_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t notify addr: %p (notify offset: %u)",
+ vq->notify_addr, notify_off);
+
+ return 0;
+}
+
+static void
+modern_del_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
+{
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+
+ io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
+ &hw->common_cfg->queue_desc_hi);
+ io_write64_twopart(0, &hw->common_cfg->queue_avail_lo,
+ &hw->common_cfg->queue_avail_hi);
+ io_write64_twopart(0, &hw->common_cfg->queue_used_lo,
+ &hw->common_cfg->queue_used_hi);
+
+ rte_write16(0, &hw->common_cfg->queue_enable);
+}
+
+static void
+modern_notify_queue(struct virtio_crypto_hw *hw __rte_unused,
+ struct virtqueue *vq)
+{
+ rte_write16(vq->vq_queue_index, vq->notify_addr);
+}
+
+const struct virtio_pci_ops virtio_crypto_modern_ops = {
+ .read_dev_cfg = modern_read_dev_config,
+ .write_dev_cfg = modern_write_dev_config,
+ .reset = modern_reset,
+ .get_status = modern_get_status,
+ .set_status = modern_set_status,
+ .get_features = modern_get_features,
+ .set_features = modern_set_features,
+ .get_isr = modern_get_isr,
+ .set_config_irq = modern_set_config_irq,
+ .set_queue_irq = modern_set_queue_irq,
+ .get_queue_num = modern_get_queue_num,
+ .setup_queue = modern_setup_queue,
+ .del_queue = modern_del_queue,
+ .notify_queue = modern_notify_queue,
+};
+
+void
+vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int length)
+{
+ VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
+}
+
+void
+vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int length)
+{
+ VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
+}
+
+uint64_t
+vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw,
+ uint64_t host_features)
+{
+ uint64_t features;
+
+ /*
+ * Limit negotiated features to what the driver, virtqueue, and
+ * host all support.
+ */
+ features = host_features & hw->guest_features;
+ VTPCI_OPS(hw)->set_features(hw, features);
+
+ return features;
+}
+
+void
+vtpci_cryptodev_reset(struct virtio_crypto_hw *hw)
+{
+ VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
+ /* flush status write */
+ VTPCI_OPS(hw)->get_status(hw);
+}
+
+void
+vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw *hw)
+{
+ vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
+}
+
+void
+vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status)
+{
+ if (status != VIRTIO_CONFIG_STATUS_RESET)
+ status |= VTPCI_OPS(hw)->get_status(hw);
+
+ VTPCI_OPS(hw)->set_status(hw, status);
+}
+
+uint8_t
+vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw)
+{
+ return VTPCI_OPS(hw)->get_status(hw);
+}
+
+uint8_t
+vtpci_cryptodev_isr(struct virtio_crypto_hw *hw)
+{
+ return VTPCI_OPS(hw)->get_isr(hw);
+}
+
+static void *
+get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
+{
+ uint8_t bar = cap->bar;
+ uint32_t length = cap->length;
+ uint32_t offset = cap->offset;
+ uint8_t *base;
+
+ if (bar >= PCI_MAX_RESOURCE) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("invalid bar: %u", bar);
+ return NULL;
+ }
+
+ if (offset + length < offset) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("offset(%u) + length(%u) overflows",
+ offset, length);
+ return NULL;
+ }
+
+ if (offset + length > dev->mem_resource[bar].len) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "invalid cap: overflows bar space: %u > %" PRIu64,
+ offset + length, dev->mem_resource[bar].len);
+ return NULL;
+ }
+
+ base = dev->mem_resource[bar].addr;
+ if (base == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("bar %u base addr is NULL", bar);
+ return NULL;
+ }
+
+ return base + offset;
+}
+
+#define PCI_MSIX_ENABLE 0x8000
+
+static int
+virtio_read_caps(struct rte_pci_device *dev, struct virtio_crypto_hw *hw)
+{
+ uint8_t pos;
+ struct virtio_pci_cap cap;
+ int ret;
+
+ if (rte_pci_map_device(dev)) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG("failed to map pci device!");
+ return -1;
+ }
+
+ ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG("failed to read pci capability list");
+ return -1;
+ }
+
+ while (pos) {
+ ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "failed to read pci cap at pos: %x", pos);
+ break;
+ }
+
+ if (cap.cap_vndr == PCI_CAP_ID_MSIX) {
+ /* Transitional devices would also have this capability,
+ * that's why we also check if msix is enabled.
+ * 1st byte is cap ID; 2nd byte is the position of next
+ * cap; next two bytes are the flags.
+ */
+ uint16_t flags = ((uint16_t *)&cap)[1];
+
+ if (flags & PCI_MSIX_ENABLE)
+ hw->use_msix = VIRTIO_MSIX_ENABLED;
+ else
+ hw->use_msix = VIRTIO_MSIX_DISABLED;
+ }
+
+ if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG(
+ "[%2x] skipping non VNDR cap id: %02x",
+ pos, cap.cap_vndr);
+ goto next;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG(
+ "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
+ pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
+
+ switch (cap.cfg_type) {
+ case VIRTIO_PCI_CAP_COMMON_CFG:
+ hw->common_cfg = get_cfg_addr(dev, &cap);
+ break;
+ case VIRTIO_PCI_CAP_NOTIFY_CFG:
+ rte_pci_read_config(dev, &hw->notify_off_multiplier,
+ 4, pos + sizeof(cap));
+ hw->notify_base = get_cfg_addr(dev, &cap);
+ break;
+ case VIRTIO_PCI_CAP_DEVICE_CFG:
+ hw->dev_cfg = get_cfg_addr(dev, &cap);
+ break;
+ case VIRTIO_PCI_CAP_ISR_CFG:
+ hw->isr = get_cfg_addr(dev, &cap);
+ break;
+ }
+
+next:
+ pos = cap.cap_next;
+ }
+
+ if (hw->common_cfg == NULL || hw->notify_base == NULL ||
+ hw->dev_cfg == NULL || hw->isr == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_INFO("no modern virtio pci device found.");
+ return -1;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_INFO("found modern virtio pci device.");
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("common cfg mapped at: %p", hw->common_cfg);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("device cfg mapped at: %p", hw->dev_cfg);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("isr cfg mapped at: %p", hw->isr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("notify base: %p, notify off multiplier: %u",
+ hw->notify_base, hw->notify_off_multiplier);
+
+ return 0;
+}
+
+/*
+ * Return -1:
+ * if there is error mapping with VFIO/UIO.
+ * if port map error when driver type is KDRV_NONE.
+ * if whitelisted but driver type is KDRV_UNKNOWN.
+ * Return 1 if kernel driver is managing the device.
+ * Return 0 on success.
+ */
+int
+vtpci_cryptodev_init(struct rte_pci_device *dev, struct virtio_crypto_hw *hw)
+{
+ /*
+ * Try if we can succeed reading virtio pci caps, which exists
+ * only on modern pci device. If failed, we fallback to legacy
+ * virtio handling.
+ */
+ if (virtio_read_caps(dev, hw) == 0) {
+ VIRTIO_CRYPTO_INIT_LOG_INFO("modern virtio pci detected.");
+ virtio_hw_internal[hw->dev_id].vtpci_ops =
+ &virtio_crypto_modern_ops;
+ hw->modern = 1;
+ return 0;
+ }
+
+ /*
+ * virtio crypto conforms to virtio 1.0 and doesn't support
+ * legacy mode
+ */
+ return -1;
+}
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.h b/src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.h
new file mode 100644
index 00000000..604ec366
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_pci.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_PCI_H_
+#define _VIRTIO_PCI_H_
+
+#include <stdint.h>
+
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_cryptodev.h>
+
+#include "virtio_crypto.h"
+
+struct virtqueue;
+
+/* VirtIO PCI vendor/device ID. */
+#define VIRTIO_CRYPTO_PCI_VENDORID 0x1AF4
+#define VIRTIO_CRYPTO_PCI_DEVICEID 0x1054
+
+/* VirtIO ABI version, this must match exactly. */
+#define VIRTIO_PCI_ABI_VERSION 0
+
+/*
+ * VirtIO Header, located in BAR 0.
+ */
+#define VIRTIO_PCI_HOST_FEATURES 0 /* host's supported features (32bit, RO)*/
+#define VIRTIO_PCI_GUEST_FEATURES 4 /* guest's supported features (32, RW) */
+#define VIRTIO_PCI_QUEUE_PFN 8 /* physical address of VQ (32, RW) */
+#define VIRTIO_PCI_QUEUE_NUM 12 /* number of ring entries (16, RO) */
+#define VIRTIO_PCI_QUEUE_SEL 14 /* current VQ selection (16, RW) */
+#define VIRTIO_PCI_QUEUE_NOTIFY 16 /* notify host regarding VQ (16, RW) */
+#define VIRTIO_PCI_STATUS 18 /* device status register (8, RW) */
+#define VIRTIO_PCI_ISR 19 /* interrupt status register, reading
+ * also clears the register (8, RO)
+ */
+/* Only if MSIX is enabled: */
+
+/* configuration change vector (16, RW) */
+#define VIRTIO_MSI_CONFIG_VECTOR 20
+/* vector for selected VQ notifications */
+#define VIRTIO_MSI_QUEUE_VECTOR 22
+
+/* The bit of the ISR which indicates a device has an interrupt. */
+#define VIRTIO_PCI_ISR_INTR 0x1
+/* The bit of the ISR which indicates a device configuration change. */
+#define VIRTIO_PCI_ISR_CONFIG 0x2
+/* Vector value used to disable MSI for queue. */
+#define VIRTIO_MSI_NO_VECTOR 0xFFFF
+
+/* Status byte for guest to report progress. */
+#define VIRTIO_CONFIG_STATUS_RESET 0x00
+#define VIRTIO_CONFIG_STATUS_ACK 0x01
+#define VIRTIO_CONFIG_STATUS_DRIVER 0x02
+#define VIRTIO_CONFIG_STATUS_DRIVER_OK 0x04
+#define VIRTIO_CONFIG_STATUS_FEATURES_OK 0x08
+#define VIRTIO_CONFIG_STATUS_FAILED 0x80
+
+/*
+ * Each virtqueue indirect descriptor list must be physically contiguous.
+ * To allow us to malloc(9) each list individually, limit the number
+ * supported to what will fit in one page. With 4KB pages, this is a limit
+ * of 256 descriptors. If there is ever a need for more, we can switch to
+ * contigmalloc(9) for the larger allocations, similar to what
+ * bus_dmamem_alloc(9) does.
+ *
+ * Note the sizeof(struct vring_desc) is 16 bytes.
+ */
+#define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16))
+
+/* Do we get callbacks when the ring is completely used, even if we've
+ * suppressed them?
+ */
+#define VIRTIO_F_NOTIFY_ON_EMPTY 24
+
+/* Can the device handle any descriptor layout? */
+#define VIRTIO_F_ANY_LAYOUT 27
+
+/* We support indirect buffer descriptors */
+#define VIRTIO_RING_F_INDIRECT_DESC 28
+
+#define VIRTIO_F_VERSION_1 32
+#define VIRTIO_F_IOMMU_PLATFORM 33
+
+/* The Guest publishes the used index for which it expects an interrupt
+ * at the end of the avail ring. Host should ignore the avail->flags field.
+ */
+/* The Host publishes the avail index for which it expects a kick
+ * at the end of the used ring. Guest should ignore the used->flags field.
+ */
+#define VIRTIO_RING_F_EVENT_IDX 29
+
+/* Common configuration */
+#define VIRTIO_PCI_CAP_COMMON_CFG 1
+/* Notifications */
+#define VIRTIO_PCI_CAP_NOTIFY_CFG 2
+/* ISR Status */
+#define VIRTIO_PCI_CAP_ISR_CFG 3
+/* Device specific configuration */
+#define VIRTIO_PCI_CAP_DEVICE_CFG 4
+/* PCI configuration access */
+#define VIRTIO_PCI_CAP_PCI_CFG 5
+
+/* This is the PCI capability header: */
+struct virtio_pci_cap {
+ uint8_t cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
+ uint8_t cap_next; /* Generic PCI field: next ptr. */
+ uint8_t cap_len; /* Generic PCI field: capability length */
+ uint8_t cfg_type; /* Identifies the structure. */
+ uint8_t bar; /* Where to find it. */
+ uint8_t padding[3]; /* Pad to full dword. */
+ uint32_t offset; /* Offset within bar. */
+ uint32_t length; /* Length of the structure, in bytes. */
+};
+
+struct virtio_pci_notify_cap {
+ struct virtio_pci_cap cap;
+ uint32_t notify_off_multiplier; /* Multiplier for queue_notify_off. */
+};
+
+/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */
+struct virtio_pci_common_cfg {
+ /* About the whole device. */
+ uint32_t device_feature_select; /* read-write */
+ uint32_t device_feature; /* read-only */
+ uint32_t guest_feature_select; /* read-write */
+ uint32_t guest_feature; /* read-write */
+ uint16_t msix_config; /* read-write */
+ uint16_t num_queues; /* read-only */
+ uint8_t device_status; /* read-write */
+ uint8_t config_generation; /* read-only */
+
+ /* About a specific virtqueue. */
+ uint16_t queue_select; /* read-write */
+ uint16_t queue_size; /* read-write, power of 2. */
+ uint16_t queue_msix_vector; /* read-write */
+ uint16_t queue_enable; /* read-write */
+ uint16_t queue_notify_off; /* read-only */
+ uint32_t queue_desc_lo; /* read-write */
+ uint32_t queue_desc_hi; /* read-write */
+ uint32_t queue_avail_lo; /* read-write */
+ uint32_t queue_avail_hi; /* read-write */
+ uint32_t queue_used_lo; /* read-write */
+ uint32_t queue_used_hi; /* read-write */
+};
+
+struct virtio_crypto_hw;
+
+struct virtio_pci_ops {
+ void (*read_dev_cfg)(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int len);
+ void (*write_dev_cfg)(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int len);
+ void (*reset)(struct virtio_crypto_hw *hw);
+
+ uint8_t (*get_status)(struct virtio_crypto_hw *hw);
+ void (*set_status)(struct virtio_crypto_hw *hw, uint8_t status);
+
+ uint64_t (*get_features)(struct virtio_crypto_hw *hw);
+ void (*set_features)(struct virtio_crypto_hw *hw, uint64_t features);
+
+ uint8_t (*get_isr)(struct virtio_crypto_hw *hw);
+
+ uint16_t (*set_config_irq)(struct virtio_crypto_hw *hw, uint16_t vec);
+
+ uint16_t (*set_queue_irq)(struct virtio_crypto_hw *hw,
+ struct virtqueue *vq, uint16_t vec);
+
+ uint16_t (*get_queue_num)(struct virtio_crypto_hw *hw,
+ uint16_t queue_id);
+ int (*setup_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq);
+ void (*del_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq);
+ void (*notify_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq);
+};
+
+struct virtio_crypto_hw {
+ /* control queue */
+ struct virtqueue *cvq;
+ uint16_t dev_id;
+ uint16_t max_dataqueues;
+ uint64_t req_guest_features;
+ uint64_t guest_features;
+ uint8_t use_msix;
+ uint8_t modern;
+ uint32_t notify_off_multiplier;
+ uint8_t *isr;
+ uint16_t *notify_base;
+ struct virtio_pci_common_cfg *common_cfg;
+ struct virtio_crypto_config *dev_cfg;
+ const struct rte_cryptodev_capabilities *virtio_dev_capabilities;
+};
+
+/*
+ * While virtio_crypto_hw is stored in shared memory, this structure stores
+ * some infos that may vary in the multiple process model locally.
+ * For example, the vtpci_ops pointer.
+ */
+struct virtio_hw_internal {
+ const struct virtio_pci_ops *vtpci_ops;
+ struct rte_pci_ioport io;
+};
+
+#define VTPCI_OPS(hw) (virtio_hw_internal[(hw)->dev_id].vtpci_ops)
+#define VTPCI_IO(hw) (&virtio_hw_internal[(hw)->dev_id].io)
+
+extern struct virtio_hw_internal virtio_hw_internal[RTE_MAX_VIRTIO_CRYPTO];
+
+/*
+ * How many bits to shift physical queue address written to QUEUE_PFN.
+ * 12 is historical, and due to x86 page size.
+ */
+#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
+
+/* The alignment to use between consumer and producer parts of vring. */
+#define VIRTIO_PCI_VRING_ALIGN 4096
+
+enum virtio_msix_status {
+ VIRTIO_MSIX_NONE = 0,
+ VIRTIO_MSIX_DISABLED = 1,
+ VIRTIO_MSIX_ENABLED = 2
+};
+
+static inline int
+vtpci_with_feature(struct virtio_crypto_hw *hw, uint64_t bit)
+{
+ return (hw->guest_features & (1ULL << bit)) != 0;
+}
+
+/*
+ * Function declaration from virtio_pci.c
+ */
+int vtpci_cryptodev_init(struct rte_pci_device *dev,
+ struct virtio_crypto_hw *hw);
+void vtpci_cryptodev_reset(struct virtio_crypto_hw *hw);
+
+void vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw *hw);
+
+uint8_t vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw);
+void vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status);
+
+uint64_t vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw,
+ uint64_t host_features);
+
+void vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int length);
+
+void vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int length);
+
+uint8_t vtpci_cryptodev_isr(struct virtio_crypto_hw *hw);
+
+#endif /* _VIRTIO_PCI_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_ring.h b/src/spdk/dpdk/drivers/crypto/virtio/virtio_ring.h
new file mode 100644
index 00000000..ee306745
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_ring.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_RING_H_
+#define _VIRTIO_RING_H_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+
+/* This marks a buffer as continuing via the next field. */
+#define VRING_DESC_F_NEXT 1
+/* This marks a buffer as write-only (otherwise read-only). */
+#define VRING_DESC_F_WRITE 2
+/* This means the buffer contains a list of buffer descriptors. */
+#define VRING_DESC_F_INDIRECT 4
+
+/* The Host uses this in used->flags to advise the Guest: don't kick me
+ * when you add a buffer. It's unreliable, so it's simply an
+ * optimization. Guest will still kick if it's out of buffers.
+ */
+#define VRING_USED_F_NO_NOTIFY 1
+/* The Guest uses this in avail->flags to advise the Host: don't
+ * interrupt me when you consume a buffer. It's unreliable, so it's
+ * simply an optimization.
+ */
+#define VRING_AVAIL_F_NO_INTERRUPT 1
+
+/* VirtIO ring descriptors: 16 bytes.
+ * These can chain together via "next".
+ */
+struct vring_desc {
+ uint64_t addr; /* Address (guest-physical). */
+ uint32_t len; /* Length. */
+ uint16_t flags; /* The flags as indicated above. */
+ uint16_t next; /* We chain unused descriptors via this. */
+};
+
+struct vring_avail {
+ uint16_t flags;
+ uint16_t idx;
+ uint16_t ring[0];
+};
+
+/* id is a 16bit index. uint32_t is used here for ids for padding reasons. */
+struct vring_used_elem {
+ /* Index of start of used descriptor chain. */
+ uint32_t id;
+ /* Total length of the descriptor chain which was written to. */
+ uint32_t len;
+};
+
+struct vring_used {
+ uint16_t flags;
+ volatile uint16_t idx;
+ struct vring_used_elem ring[0];
+};
+
+struct vring {
+ unsigned int num;
+ struct vring_desc *desc;
+ struct vring_avail *avail;
+ struct vring_used *used;
+};
+
+/* The standard layout for the ring is a continuous chunk of memory which
+ * looks like this. We assume num is a power of 2.
+ *
+ * struct vring {
+ * // The actual descriptors (16 bytes each)
+ * struct vring_desc desc[num];
+ *
+ * // A ring of available descriptor heads with free-running index.
+ * __u16 avail_flags;
+ * __u16 avail_idx;
+ * __u16 available[num];
+ * __u16 used_event_idx;
+ *
+ * // Padding to the next align boundary.
+ * char pad[];
+ *
+ * // A ring of used descriptor heads with free-running index.
+ * __u16 used_flags;
+ * __u16 used_idx;
+ * struct vring_used_elem used[num];
+ * __u16 avail_event_idx;
+ * };
+ *
+ * NOTE: for VirtIO PCI, align is 4096.
+ */
+
+/*
+ * We publish the used event index at the end of the available ring, and vice
+ * versa. They are at the end for backwards compatibility.
+ */
+#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
+#define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num])
+
+static inline size_t
+vring_size(unsigned int num, unsigned long align)
+{
+ size_t size;
+
+ size = num * sizeof(struct vring_desc);
+ size += sizeof(struct vring_avail) + (num * sizeof(uint16_t));
+ size = RTE_ALIGN_CEIL(size, align);
+ size += sizeof(struct vring_used) +
+ (num * sizeof(struct vring_used_elem));
+ return size;
+}
+
+static inline void
+vring_init(struct vring *vr, unsigned int num, uint8_t *p,
+ unsigned long align)
+{
+ vr->num = num;
+ vr->desc = (struct vring_desc *) p;
+ vr->avail = (struct vring_avail *) (p +
+ num * sizeof(struct vring_desc));
+ vr->used = (void *)
+ RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[num]), align);
+}
+
+/*
+ * The following is used with VIRTIO_RING_F_EVENT_IDX.
+ * Assuming a given event_idx value from the other size, if we have
+ * just incremented index from old to new_idx, should we trigger an
+ * event?
+ */
+static inline int
+vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
+{
+ return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
+}
+
+#endif /* _VIRTIO_RING_H_ */
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtio_rxtx.c b/src/spdk/dpdk/drivers/crypto/virtio/virtio_rxtx.c
new file mode 100644
index 00000000..e32a1ecd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtio_rxtx.c
@@ -0,0 +1,527 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+#include <rte_cryptodev_pmd.h>
+
+#include "virtqueue.h"
+#include "virtio_cryptodev.h"
+#include "virtio_crypto_algs.h"
+
+static void
+vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
+{
+ struct vring_desc *dp, *dp_tail;
+ struct vq_desc_extra *dxp;
+ uint16_t desc_idx_last = desc_idx;
+
+ dp = &vq->vq_ring.desc[desc_idx];
+ dxp = &vq->vq_descx[desc_idx];
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
+ if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
+ while (dp->flags & VRING_DESC_F_NEXT) {
+ desc_idx_last = dp->next;
+ dp = &vq->vq_ring.desc[dp->next];
+ }
+ }
+ dxp->ndescs = 0;
+
+ /*
+ * We must append the existing free chain, if any, to the end of
+ * newly freed chain. If the virtqueue was completely used, then
+ * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
+ */
+ if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
+ vq->vq_desc_head_idx = desc_idx;
+ } else {
+ dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
+ dp_tail->next = desc_idx;
+ }
+
+ vq->vq_desc_tail_idx = desc_idx_last;
+ dp->next = VQ_RING_DESC_CHAIN_END;
+}
+
+static uint16_t
+virtqueue_dequeue_burst_rx(struct virtqueue *vq,
+ struct rte_crypto_op **rx_pkts, uint16_t num)
+{
+ struct vring_used_elem *uep;
+ struct rte_crypto_op *cop;
+ uint16_t used_idx, desc_idx;
+ uint16_t i;
+ struct virtio_crypto_inhdr *inhdr;
+ struct virtio_crypto_op_cookie *op_cookie;
+
+ /* Caller does the check */
+ for (i = 0; i < num ; i++) {
+ used_idx = (uint16_t)(vq->vq_used_cons_idx
+ & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ desc_idx = (uint16_t)uep->id;
+ cop = (struct rte_crypto_op *)
+ vq->vq_descx[desc_idx].crypto_op;
+ if (unlikely(cop == NULL)) {
+ VIRTIO_CRYPTO_RX_LOG_DBG("vring descriptor with no "
+ "mbuf cookie at %u",
+ vq->vq_used_cons_idx);
+ break;
+ }
+
+ op_cookie = (struct virtio_crypto_op_cookie *)
+ vq->vq_descx[desc_idx].cookie;
+ inhdr = &(op_cookie->inhdr);
+ switch (inhdr->status) {
+ case VIRTIO_CRYPTO_OK:
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ break;
+ case VIRTIO_CRYPTO_ERR:
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ vq->packets_received_failed++;
+ break;
+ case VIRTIO_CRYPTO_BADMSG:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ vq->packets_received_failed++;
+ break;
+ case VIRTIO_CRYPTO_NOTSUPP:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ vq->packets_received_failed++;
+ break;
+ case VIRTIO_CRYPTO_INVSESS:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ vq->packets_received_failed++;
+ break;
+ default:
+ break;
+ }
+
+ vq->packets_received_total++;
+
+ rx_pkts[i] = cop;
+ rte_mempool_put(vq->mpool, op_cookie);
+
+ vq->vq_used_cons_idx++;
+ vq_ring_free_chain(vq, desc_idx);
+ vq->vq_descx[desc_idx].crypto_op = NULL;
+ }
+
+ return i;
+}
+
+static int
+virtqueue_crypto_sym_pkt_header_arrange(
+ struct rte_crypto_op *cop,
+ struct virtio_crypto_op_data_req *data,
+ struct virtio_crypto_session *session)
+{
+ struct rte_crypto_sym_op *sym_op = cop->sym;
+ struct virtio_crypto_op_data_req *req_data = data;
+ struct virtio_crypto_op_ctrl_req *ctrl = &session->ctrl;
+ struct virtio_crypto_sym_create_session_req *sym_sess_req =
+ &ctrl->u.sym_create_session;
+ struct virtio_crypto_alg_chain_session_para *chain_para =
+ &sym_sess_req->u.chain.para;
+ struct virtio_crypto_cipher_session_para *cipher_para;
+
+ req_data->header.session_id = session->session_id;
+
+ switch (sym_sess_req->op_type) {
+ case VIRTIO_CRYPTO_SYM_OP_CIPHER:
+ req_data->u.sym_req.op_type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+
+ cipher_para = &sym_sess_req->u.cipher.para;
+ if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
+ else
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
+
+ req_data->u.sym_req.u.cipher.para.iv_len
+ = session->iv.length;
+
+ req_data->u.sym_req.u.cipher.para.src_data_len =
+ (sym_op->cipher.data.length +
+ sym_op->cipher.data.offset);
+ req_data->u.sym_req.u.cipher.para.dst_data_len =
+ req_data->u.sym_req.u.cipher.para.src_data_len;
+ break;
+ case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
+ req_data->u.sym_req.op_type =
+ VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
+
+ cipher_para = &chain_para->cipher_param;
+ if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
+ else
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
+
+ req_data->u.sym_req.u.chain.para.iv_len = session->iv.length;
+ req_data->u.sym_req.u.chain.para.aad_len = session->aad.length;
+
+ req_data->u.sym_req.u.chain.para.src_data_len =
+ (sym_op->cipher.data.length +
+ sym_op->cipher.data.offset);
+ req_data->u.sym_req.u.chain.para.dst_data_len =
+ req_data->u.sym_req.u.chain.para.src_data_len;
+ req_data->u.sym_req.u.chain.para.cipher_start_src_offset =
+ sym_op->cipher.data.offset;
+ req_data->u.sym_req.u.chain.para.len_to_cipher =
+ sym_op->cipher.data.length;
+ req_data->u.sym_req.u.chain.para.hash_start_src_offset =
+ sym_op->auth.data.offset;
+ req_data->u.sym_req.u.chain.para.len_to_hash =
+ sym_op->auth.data.length;
+ req_data->u.sym_req.u.chain.para.aad_len =
+ chain_para->aad_len;
+
+ if (chain_para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
+ req_data->u.sym_req.u.chain.para.hash_result_len =
+ chain_para->u.hash_param.hash_result_len;
+ if (chain_para->hash_mode ==
+ VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
+ req_data->u.sym_req.u.chain.para.hash_result_len =
+ chain_para->u.mac_param.hash_result_len;
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtqueue_crypto_sym_enqueue_xmit(
+ struct virtqueue *txvq,
+ struct rte_crypto_op *cop)
+{
+ uint16_t idx = 0;
+ uint16_t num_entry;
+ uint16_t needed = 1;
+ uint16_t head_idx;
+ struct vq_desc_extra *dxp;
+ struct vring_desc *start_dp;
+ struct vring_desc *desc;
+ uint64_t indirect_op_data_req_phys_addr;
+ uint16_t req_data_len = sizeof(struct virtio_crypto_op_data_req);
+ uint32_t indirect_vring_addr_offset = req_data_len +
+ sizeof(struct virtio_crypto_inhdr);
+ uint32_t indirect_iv_addr_offset = indirect_vring_addr_offset +
+ sizeof(struct vring_desc) * NUM_ENTRY_VIRTIO_CRYPTO_OP;
+ struct rte_crypto_sym_op *sym_op = cop->sym;
+ struct virtio_crypto_session *session =
+ (struct virtio_crypto_session *)get_sym_session_private_data(
+ cop->sym->session, cryptodev_virtio_driver_id);
+ struct virtio_crypto_op_data_req *op_data_req;
+ uint32_t hash_result_len = 0;
+ struct virtio_crypto_op_cookie *crypto_op_cookie;
+ struct virtio_crypto_alg_chain_session_para *para;
+
+ if (unlikely(sym_op->m_src->nb_segs != 1))
+ return -EMSGSIZE;
+ if (unlikely(txvq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(txvq->vq_free_cnt < needed))
+ return -EMSGSIZE;
+ head_idx = txvq->vq_desc_head_idx;
+ if (unlikely(head_idx >= txvq->vq_nentries))
+ return -EFAULT;
+ if (unlikely(session == NULL))
+ return -EFAULT;
+
+ dxp = &txvq->vq_descx[head_idx];
+
+ if (rte_mempool_get(txvq->mpool, &dxp->cookie)) {
+ VIRTIO_CRYPTO_TX_LOG_ERR("can not get cookie");
+ return -EFAULT;
+ }
+ crypto_op_cookie = dxp->cookie;
+ indirect_op_data_req_phys_addr =
+ rte_mempool_virt2iova(crypto_op_cookie);
+ op_data_req = (struct virtio_crypto_op_data_req *)crypto_op_cookie;
+
+ if (virtqueue_crypto_sym_pkt_header_arrange(cop, op_data_req, session))
+ return -EFAULT;
+
+ /* status is initialized to VIRTIO_CRYPTO_ERR */
+ ((struct virtio_crypto_inhdr *)
+ ((uint8_t *)op_data_req + req_data_len))->status =
+ VIRTIO_CRYPTO_ERR;
+
+ /* point to indirect vring entry */
+ desc = (struct vring_desc *)
+ ((uint8_t *)op_data_req + indirect_vring_addr_offset);
+ for (idx = 0; idx < (NUM_ENTRY_VIRTIO_CRYPTO_OP - 1); idx++)
+ desc[idx].next = idx + 1;
+ desc[NUM_ENTRY_VIRTIO_CRYPTO_OP - 1].next = VQ_RING_DESC_CHAIN_END;
+
+ idx = 0;
+
+ /* indirect vring: first part, virtio_crypto_op_data_req */
+ desc[idx].addr = indirect_op_data_req_phys_addr;
+ desc[idx].len = req_data_len;
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+
+ /* indirect vring: iv of cipher */
+ if (session->iv.length) {
+ if (cop->phys_addr)
+ desc[idx].addr = cop->phys_addr + session->iv.offset;
+ else {
+ rte_memcpy(crypto_op_cookie->iv,
+ rte_crypto_op_ctod_offset(cop,
+ uint8_t *, session->iv.offset),
+ session->iv.length);
+ desc[idx].addr = indirect_op_data_req_phys_addr +
+ indirect_iv_addr_offset;
+ }
+
+ desc[idx].len = session->iv.length;
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+ }
+
+ /* indirect vring: additional auth data */
+ if (session->aad.length) {
+ desc[idx].addr = session->aad.phys_addr;
+ desc[idx].len = session->aad.length;
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+ }
+
+ /* indirect vring: src data */
+ desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);
+ desc[idx].len = (sym_op->cipher.data.offset
+ + sym_op->cipher.data.length);
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+
+ /* indirect vring: dst data */
+ if (sym_op->m_dst) {
+ desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_dst, 0);
+ desc[idx].len = (sym_op->cipher.data.offset
+ + sym_op->cipher.data.length);
+ } else {
+ desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);
+ desc[idx].len = (sym_op->cipher.data.offset
+ + sym_op->cipher.data.length);
+ }
+ desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
+
+ /* indirect vring: digest result */
+ para = &(session->ctrl.u.sym_create_session.u.chain.para);
+ if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
+ hash_result_len = para->u.hash_param.hash_result_len;
+ if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
+ hash_result_len = para->u.mac_param.hash_result_len;
+ if (hash_result_len > 0) {
+ desc[idx].addr = sym_op->auth.digest.phys_addr;
+ desc[idx].len = hash_result_len;
+ desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
+ }
+
+ /* indirect vring: last part, status returned */
+ desc[idx].addr = indirect_op_data_req_phys_addr + req_data_len;
+ desc[idx].len = sizeof(struct virtio_crypto_inhdr);
+ desc[idx++].flags = VRING_DESC_F_WRITE;
+
+ num_entry = idx;
+
+ /* save the infos to use when receiving packets */
+ dxp->crypto_op = (void *)cop;
+ dxp->ndescs = needed;
+
+ /* use a single buffer */
+ start_dp = txvq->vq_ring.desc;
+ start_dp[head_idx].addr = indirect_op_data_req_phys_addr +
+ indirect_vring_addr_offset;
+ start_dp[head_idx].len = num_entry * sizeof(struct vring_desc);
+ start_dp[head_idx].flags = VRING_DESC_F_INDIRECT;
+
+ idx = start_dp[head_idx].next;
+ txvq->vq_desc_head_idx = idx;
+ if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ txvq->vq_desc_tail_idx = idx;
+ txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
+ vq_update_avail_ring(txvq, head_idx);
+
+ return 0;
+}
+
+static int
+virtqueue_crypto_enqueue_xmit(struct virtqueue *txvq,
+ struct rte_crypto_op *cop)
+{
+ int ret;
+
+ switch (cop->type) {
+ case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
+ ret = virtqueue_crypto_sym_enqueue_xmit(txvq, cop);
+ break;
+ default:
+ VIRTIO_CRYPTO_TX_LOG_ERR("invalid crypto op type %u",
+ cop->type);
+ ret = -EFAULT;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+virtio_crypto_vring_start(struct virtqueue *vq)
+{
+ struct virtio_crypto_hw *hw = vq->hw;
+ int i, size = vq->vq_nentries;
+ struct vring *vr = &vq->vq_ring;
+ uint8_t *ring_mem = vq->vq_ring_virt_mem;
+
+ PMD_INIT_FUNC_TRACE();
+
+ vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
+ vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+ vq->vq_free_cnt = vq->vq_nentries;
+
+ /* Chain all the descriptors in the ring with an END */
+ for (i = 0; i < size - 1; i++)
+ vr->desc[i].next = (uint16_t)(i + 1);
+ vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
+
+ /*
+ * Disable device(host) interrupting guest
+ */
+ virtqueue_disable_intr(vq);
+
+ /*
+ * Set guest physical address of the virtqueue
+ * in VIRTIO_PCI_QUEUE_PFN config register of device
+ * to share with the backend
+ */
+ if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("setup_queue failed");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void
+virtio_crypto_ctrlq_start(struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ if (hw->cvq) {
+ virtio_crypto_vring_start(hw->cvq);
+ VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);
+ }
+}
+
+void
+virtio_crypto_dataq_start(struct rte_cryptodev *dev)
+{
+ /*
+ * Start data vrings
+ * - Setup vring structure for data queues
+ */
+ uint16_t i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Start data vring. */
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ virtio_crypto_vring_start(dev->data->queue_pairs[i]);
+ VIRTQUEUE_DUMP((struct virtqueue *)dev->data->queue_pairs[i]);
+ }
+}
+
+/* vring size of data queue is 1024 */
+#define VIRTIO_MBUF_BURST_SZ 1024
+
+uint16_t
+virtio_crypto_pkt_rx_burst(void *tx_queue, struct rte_crypto_op **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtqueue *txvq = tx_queue;
+ uint16_t nb_used, num, nb_rx;
+
+ nb_used = VIRTQUEUE_NUSED(txvq);
+
+ virtio_rmb();
+
+ num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
+ num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ)
+ ? num : VIRTIO_MBUF_BURST_SZ);
+
+ if (num == 0)
+ return 0;
+
+ nb_rx = virtqueue_dequeue_burst_rx(txvq, rx_pkts, num);
+ VIRTIO_CRYPTO_RX_LOG_DBG("used:%d dequeue:%d", nb_used, num);
+
+ return nb_rx;
+}
+
+uint16_t
+virtio_crypto_pkt_tx_burst(void *tx_queue, struct rte_crypto_op **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtqueue *txvq;
+ uint16_t nb_tx;
+ int error;
+
+ if (unlikely(nb_pkts < 1))
+ return nb_pkts;
+ if (unlikely(tx_queue == NULL)) {
+ VIRTIO_CRYPTO_TX_LOG_ERR("tx_queue is NULL");
+ return 0;
+ }
+ txvq = tx_queue;
+
+ VIRTIO_CRYPTO_TX_LOG_DBG("%d packets to xmit", nb_pkts);
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ struct rte_mbuf *txm = tx_pkts[nb_tx]->sym->m_src;
+ /* nb_segs is always 1 at virtio crypto situation */
+ int need = txm->nb_segs - txvq->vq_free_cnt;
+
+ /*
+ * Positive value indicates it hasn't enough space in vring
+ * descriptors
+ */
+ if (unlikely(need > 0)) {
+ /*
+ * try it again because the receive process may be
+ * free some space
+ */
+ need = txm->nb_segs - txvq->vq_free_cnt;
+ if (unlikely(need > 0)) {
+ VIRTIO_CRYPTO_TX_LOG_DBG("No free tx "
+ "descriptors to transmit");
+ break;
+ }
+ }
+
+ txvq->packets_sent_total++;
+
+ /* Enqueue Packet buffers */
+ error = virtqueue_crypto_enqueue_xmit(txvq, tx_pkts[nb_tx]);
+ if (unlikely(error)) {
+ if (error == ENOSPC)
+ VIRTIO_CRYPTO_TX_LOG_ERR(
+ "virtqueue_enqueue Free count = 0");
+ else if (error == EMSGSIZE)
+ VIRTIO_CRYPTO_TX_LOG_ERR(
+ "virtqueue_enqueue Free count < 1");
+ else
+ VIRTIO_CRYPTO_TX_LOG_ERR(
+ "virtqueue_enqueue error: %d", error);
+ txvq->packets_sent_failed++;
+ break;
+ }
+ }
+
+ if (likely(nb_tx)) {
+ vq_update_avail_idx(txvq);
+
+ if (unlikely(virtqueue_kick_prepare(txvq))) {
+ virtqueue_notify(txvq);
+ VIRTIO_CRYPTO_TX_LOG_DBG("Notified backend after xmit");
+ }
+ }
+
+ return nb_tx;
+}
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtqueue.c b/src/spdk/dpdk/drivers/crypto/virtio/virtqueue.c
new file mode 100644
index 00000000..fd8be581
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtqueue.c
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+#include <rte_crypto.h>
+#include <rte_malloc.h>
+
+#include "virtqueue.h"
+
+void
+virtqueue_disable_intr(struct virtqueue *vq)
+{
+ /*
+ * Set VRING_AVAIL_F_NO_INTERRUPT to hint host
+ * not to interrupt when it consumes packets
+ * Note: this is only considered a hint to the host
+ */
+ vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+}
+
+void
+virtqueue_detatch_unused(struct virtqueue *vq)
+{
+ struct rte_crypto_op *cop = NULL;
+
+ int idx;
+
+ if (vq != NULL)
+ for (idx = 0; idx < vq->vq_nentries; idx++) {
+ cop = vq->vq_descx[idx].crypto_op;
+ if (cop) {
+ if (cop->sym->m_src)
+ rte_pktmbuf_free(cop->sym->m_src);
+ if (cop->sym->m_dst)
+ rte_pktmbuf_free(cop->sym->m_dst);
+ rte_crypto_op_free(cop);
+ vq->vq_descx[idx].crypto_op = NULL;
+ }
+ }
+}
diff --git a/src/spdk/dpdk/drivers/crypto/virtio/virtqueue.h b/src/spdk/dpdk/drivers/crypto/virtio/virtqueue.h
new file mode 100644
index 00000000..bf10c657
--- /dev/null
+++ b/src/spdk/dpdk/drivers/crypto/virtio/virtqueue.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTQUEUE_H_
+#define _VIRTQUEUE_H_
+
+#include <stdint.h>
+
+#include <rte_atomic.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_mempool.h>
+
+#include "virtio_pci.h"
+#include "virtio_ring.h"
+#include "virtio_logs.h"
+#include "virtio_crypto.h"
+
+struct rte_mbuf;
+
+/*
+ * Per virtio_config.h in Linux.
+ * For virtio_pci on SMP, we don't need to order with respect to MMIO
+ * accesses through relaxed memory I/O windows, so smp_mb() et al are
+ * sufficient.
+ *
+ */
+#define virtio_mb() rte_smp_mb()
+#define virtio_rmb() rte_smp_rmb()
+#define virtio_wmb() rte_smp_wmb()
+
+#define VIRTQUEUE_MAX_NAME_SZ 32
+
+enum { VTCRYPTO_DATAQ = 0, VTCRYPTO_CTRLQ = 1 };
+
+/**
+ * The maximum virtqueue size is 2^15. Use that value as the end of
+ * descriptor chain terminator since it will never be a valid index
+ * in the descriptor table. This is used to verify we are correctly
+ * handling vq_free_cnt.
+ */
+#define VQ_RING_DESC_CHAIN_END 32768
+
+struct vq_desc_extra {
+ void *crypto_op;
+ void *cookie;
+ uint16_t ndescs;
+};
+
+struct virtqueue {
+ /**< virtio_crypto_hw structure pointer. */
+ struct virtio_crypto_hw *hw;
+ /**< mem zone to populate RX ring. */
+ const struct rte_memzone *mz;
+ /**< memzone to populate hdr and request. */
+ struct rte_mempool *mpool;
+ uint8_t dev_id; /**< Device identifier. */
+ uint16_t vq_queue_index; /**< PCI queue index */
+
+ void *vq_ring_virt_mem; /**< linear address of vring*/
+ unsigned int vq_ring_size;
+ phys_addr_t vq_ring_mem; /**< physical address of vring */
+
+ struct vring vq_ring; /**< vring keeping desc, used and avail */
+ uint16_t vq_free_cnt; /**< num of desc available */
+ uint16_t vq_nentries; /**< vring desc numbers */
+
+ /**
+ * Head of the free chain in the descriptor table. If
+ * there are no free descriptors, this will be set to
+ * VQ_RING_DESC_CHAIN_END.
+ */
+ uint16_t vq_desc_head_idx;
+ uint16_t vq_desc_tail_idx;
+ /**
+ * Last consumed descriptor in the used table,
+ * trails vq_ring.used->idx.
+ */
+ uint16_t vq_used_cons_idx;
+ uint16_t vq_avail_idx;
+
+ /* Statistics */
+ uint64_t packets_sent_total;
+ uint64_t packets_sent_failed;
+ uint64_t packets_received_total;
+ uint64_t packets_received_failed;
+
+ uint16_t *notify_addr;
+
+ struct vq_desc_extra vq_descx[0];
+};
+
+/**
+ * Tell the backend not to interrupt us.
+ */
+void virtqueue_disable_intr(struct virtqueue *vq);
+
+/**
+ * Get all mbufs to be freed.
+ */
+void virtqueue_detatch_unused(struct virtqueue *vq);
+
+static inline int
+virtqueue_full(const struct virtqueue *vq)
+{
+ return vq->vq_free_cnt == 0;
+}
+
+#define VIRTQUEUE_NUSED(vq) \
+ ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
+
+static inline void
+vq_update_avail_idx(struct virtqueue *vq)
+{
+ virtio_wmb();
+ vq->vq_ring.avail->idx = vq->vq_avail_idx;
+}
+
+static inline void
+vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
+{
+ uint16_t avail_idx;
+ /*
+ * Place the head of the descriptor chain into the next slot and make
+ * it usable to the host. The chain is made available now rather than
+ * deferring to virtqueue_notify() in the hopes that if the host is
+ * currently running on another CPU, we can keep it processing the new
+ * descriptor.
+ */
+ avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
+ if (unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx))
+ vq->vq_ring.avail->ring[avail_idx] = desc_idx;
+ vq->vq_avail_idx++;
+}
+
+static inline int
+virtqueue_kick_prepare(struct virtqueue *vq)
+{
+ return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
+}
+
+static inline void
+virtqueue_notify(struct virtqueue *vq)
+{
+ /*
+ * Ensure updated avail->idx is visible to host.
+ * For virtio on IA, the notificaiton is through io port operation
+ * which is a serialization instruction itself.
+ */
+ VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
+}
+
+/**
+ * Dump virtqueue internal structures, for debug purpose only.
+ */
+#define VIRTQUEUE_DUMP(vq) do { \
+ uint16_t used_idx, nused; \
+ used_idx = (vq)->vq_ring.used->idx; \
+ nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
+ VIRTIO_CRYPTO_INIT_LOG_DBG(\
+ "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
+ " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
+ " avail.flags=0x%x; used.flags=0x%x", \
+ (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
+ (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \
+ (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \
+ (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \
+} while (0)
+
+#endif /* _VIRTQUEUE_H_ */