summaryrefslogtreecommitdiffstats
path: root/src/spdk/test/unit/lib/bdev/crypto.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/spdk/test/unit/lib/bdev/crypto.c')
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/Makefile41
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c908
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/rte_crypto.h95
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/rte_cryptodev.h153
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/rte_mbuf.h148
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/rte_mempool.h145
7 files changed, 1491 insertions, 0 deletions
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/.gitignore b/src/spdk/test/unit/lib/bdev/crypto.c/.gitignore
new file mode 100644
index 00000000..b2777562
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/.gitignore
@@ -0,0 +1 @@
+crypto_ut
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/Makefile b/src/spdk/test/unit/lib/bdev/crypto.c/Makefile
new file mode 100644
index 00000000..3241464b
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/Makefile
@@ -0,0 +1,41 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = crypto_ut.c
+CFLAGS += $(ENV_CFLAGS)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c b/src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c
new file mode 100644
index 00000000..f01aba19
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c
@@ -0,0 +1,908 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+#include "spdk_internal/mock.h"
+#include "unit/lib/json_mock.c"
+
+/* these rte_ headers are our local copies of the DPDK headers hacked to mock some functions
+ * included in them that can't be done with our mock library.
+ */
+#include "rte_crypto.h"
+#include "rte_cryptodev.h"
+DEFINE_STUB_V(rte_crypto_op_free, (struct rte_crypto_op *op));
+#include "bdev/crypto/vbdev_crypto.c"
+
+/* SPDK stubs */
+DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *,
+ (struct spdk_conf *cp, const char *name), NULL);
+DEFINE_STUB(spdk_conf_section_get_nval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx), NULL);
+DEFINE_STUB(spdk_conf_section_get_nmval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
+
+DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
+DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
+DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
+ enum spdk_bdev_io_type io_type), 0);
+DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
+DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
+DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
+DEFINE_STUB(spdk_env_get_current_core, uint32_t, (void), 0);
+DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
+DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
+ void *cb_arg));
+DEFINE_STUB(spdk_bdev_open, int, (struct spdk_bdev *bdev, bool write,
+ spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **_desc), 0);
+DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
+ struct spdk_bdev_module *module), 0);
+DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
+DEFINE_STUB(spdk_vbdev_register, int, (struct spdk_bdev *vbdev, struct spdk_bdev **base_bdevs,
+ int base_bdev_count), 0);
+DEFINE_STUB(spdk_bdev_get_by_name, struct spdk_bdev *, (const char *bdev_name), NULL);
+DEFINE_STUB(spdk_env_get_socket_id, uint32_t, (uint32_t core), 0);
+
+/* DPDK stubs */
+DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
+DEFINE_STUB(rte_eal_get_configuration, struct rte_config *, (void), NULL);
+DEFINE_STUB_V(rte_mempool_free, (struct rte_mempool *mp));
+DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
+DEFINE_STUB(rte_crypto_op_pool_create, struct rte_mempool *,
+ (const char *name, enum rte_crypto_op_type type, unsigned nb_elts,
+ unsigned cache_size, uint16_t priv_size, int socket_id), (struct rte_mempool *)1);
+DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0);
+DEFINE_STUB(rte_cryptodev_socket_id, int, (uint8_t dev_id), 0);
+DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0);
+DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool), 0);
+DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0)
+DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id));
+DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *,
+ (struct rte_mempool *mempool), (struct rte_cryptodev_sym_session *)1);
+DEFINE_STUB(rte_cryptodev_sym_session_clear, int, (uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess), 0);
+DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0);
+DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0);
+DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
+void __attribute__((noreturn)) __rte_panic(const char *funcname, const char *format, ...)
+{
+ abort();
+}
+struct rte_mempool_ops_table rte_mempool_ops_table;
+struct rte_cryptodev *rte_cryptodevs;
+__thread unsigned per_lcore__lcore_id = 0;
+
+/* global vars and setup/cleanup functions used for all test functions */
+struct spdk_bdev_io *g_bdev_io;
+struct crypto_bdev_io *g_io_ctx;
+struct crypto_io_channel *g_crypto_ch;
+struct spdk_io_channel *g_io_ch;
+struct vbdev_dev g_device;
+struct vbdev_crypto g_crypto_bdev;
+struct rte_config *g_test_config;
+struct device_qp g_dev_qp;
+
+#define MAX_TEST_BLOCKS 8192
+struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS];
+struct rte_crypto_op *g_test_dequeued_ops[MAX_TEST_BLOCKS];
+struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS];
+
+/* These globals are externs in our local rte_ header files so we can control
+ * specific functions for mocking.
+ */
+uint16_t g_dequeue_mock;
+uint16_t g_enqueue_mock;
+unsigned ut_rte_crypto_op_bulk_alloc;
+int ut_rte_crypto_op_attach_sym_session = 0;
+
+int ut_rte_cryptodev_info_get = 0;
+bool ut_rte_cryptodev_info_get_mocked = false;
+void
+rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
+{
+ dev_info->max_nb_queue_pairs = ut_rte_cryptodev_info_get;
+}
+
+unsigned int
+rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
+{
+ return (unsigned int)dev_id;
+}
+
+void
+spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
+{
+ cb(g_io_ch, g_bdev_io);
+}
+
+/* Mock these functions to call the callback and then return the value we require */
+int ut_spdk_bdev_readv_blocks = 0;
+bool ut_spdk_bdev_readv_blocks_mocked = false;
+int
+spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
+ return ut_spdk_bdev_readv_blocks;
+}
+
+int ut_spdk_bdev_writev_blocks = 0;
+bool ut_spdk_bdev_writev_blocks_mocked = false;
+int
+spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
+ return ut_spdk_bdev_writev_blocks;
+}
+
+int ut_spdk_bdev_unmap_blocks = 0;
+bool ut_spdk_bdev_unmap_blocks_mocked = false;
+int
+spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
+ return ut_spdk_bdev_unmap_blocks;
+}
+
+int ut_spdk_bdev_flush_blocks = 0;
+bool ut_spdk_bdev_flush_blocks_mocked = false;
+int
+spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
+ void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
+ return ut_spdk_bdev_flush_blocks;
+}
+
+int ut_spdk_bdev_reset = 0;
+bool ut_spdk_bdev_reset_mocked = false;
+int
+spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
+ return ut_spdk_bdev_reset;
+}
+
+bool g_completion_called = false;
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+ bdev_io->internal.status = status;
+ g_completion_called = true;
+}
+
+/* Used in testing device full condition */
+static inline uint16_t
+rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int i;
+
+ CU_ASSERT(nb_ops > 0);
+
+ for (i = 0; i < nb_ops; i++) {
+ /* Use this empty (til now) array of pointers to store
+ * enqueued operations for assertion in dev_full test.
+ */
+ g_test_dev_full_ops[i] = *ops++;
+ }
+
+ return g_enqueue_mock;
+}
+
+/* This is pretty ugly but in order to complete an IO via the
+ * poller in the submit path, we need to first call to this func
+ * to return the dequeued value and also decrement it. On the subsequent
+ * call it needs to return 0 to indicate to the caller that there are
+ * no more IOs to drain.
+ */
+int g_test_overflow = 0;
+static inline uint16_t
+rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ CU_ASSERT(nb_ops > 0);
+
+ /* A crypto device can be full on enqueue, the driver is designed to drain
+ * the device at the time by calling the poller until it's empty, then
+ * submitting the remaining crypto ops.
+ */
+ if (g_test_overflow) {
+ if (g_dequeue_mock == 0) {
+ return 0;
+ }
+ *ops = g_test_crypto_ops[g_enqueue_mock];
+ (*ops)->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ g_dequeue_mock -= 1;
+ }
+ return (g_dequeue_mock + 1);
+}
+
+/* Instead of allocating real memory, assign the allocations to our
+ * test array for assertion in tests.
+ */
+static inline unsigned
+rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int i;
+
+ for (i = 0; i < nb_ops; i++) {
+ *ops++ = g_test_crypto_ops[i];
+ }
+ return ut_rte_crypto_op_bulk_alloc;
+}
+
+static __rte_always_inline void
+rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
+ unsigned int n)
+{
+ return;
+}
+
+static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
+{
+ return NULL;
+}
+
+
+static inline int
+rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ return ut_rte_crypto_op_attach_sym_session;
+}
+
+/* Global setup for all tests that share a bunch of preparation... */
+static int
+test_setup(void)
+{
+ int i;
+
+ /* Prepare essential variables for test routines */
+ g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct crypto_bdev_io));
+ g_bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec) * 128);
+ g_bdev_io->bdev = &g_crypto_bdev.crypto_bdev;
+ g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct crypto_io_channel));
+ g_crypto_ch = (struct crypto_io_channel *)((uint8_t *)g_io_ch + sizeof(struct spdk_io_channel));
+ g_io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
+ memset(&g_device, 0, sizeof(struct vbdev_dev));
+ memset(&g_crypto_bdev, 0, sizeof(struct vbdev_crypto));
+ g_dev_qp.device = &g_device;
+ g_io_ctx->crypto_ch = g_crypto_ch;
+ g_io_ctx->crypto_bdev = &g_crypto_bdev;
+ g_crypto_ch->device_qp = &g_dev_qp;
+ g_test_config = calloc(1, sizeof(struct rte_config));
+ g_test_config->lcore_count = 1;
+
+ /* Allocate a real mbuf pool so we can test error paths */
+ g_mbuf_mp = spdk_mempool_create("mbuf_mp", NUM_MBUFS, sizeof(struct rte_mbuf),
+ SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
+ SPDK_ENV_SOCKET_ID_ANY);
+
+ /* Instead of allocating real rte mempools for these, it's easier and provides the
+ * same coverage just calloc them here.
+ */
+ for (i = 0; i < MAX_TEST_BLOCKS; i++) {
+ g_test_crypto_ops[i] = calloc(1, sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op));
+ g_test_dequeued_ops[i] = calloc(1, sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op));
+ }
+ return 0;
+}
+
+/* Global teardown for all tests */
+static int
+test_cleanup(void)
+{
+ int i;
+
+ free(g_test_config);
+ spdk_mempool_free(g_mbuf_mp);
+ for (i = 0; i < MAX_TEST_BLOCKS; i++) {
+ free(g_test_crypto_ops[i]);
+ free(g_test_dequeued_ops[i]);
+ }
+ free(g_bdev_io->u.bdev.iovs);
+ free(g_bdev_io);
+ free(g_io_ch);
+ return 0;
+}
+
+static void
+test_error_paths(void)
+{
+ /* Single element block size write, just to test error paths
+ * in vbdev_crypto_submit_request().
+ */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = 1;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_crypto_bdev.crypto_bdev.blocklen = 512;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
+
+ /* test failure of spdk_mempool_get_bulk() */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ MOCK_SET(spdk_mempool_get, NULL);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+
+ /* same thing but switch to reads to test error path in _crypto_complete_io() */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ /* Now with the read_blocks failing */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ MOCK_SET(spdk_bdev_readv_blocks, -1);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_SET(spdk_bdev_readv_blocks, 0);
+ MOCK_CLEAR(spdk_mempool_get);
+
+ /* test failure of rte_crypto_op_bulk_alloc() */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ ut_rte_crypto_op_bulk_alloc = 0;
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ ut_rte_crypto_op_bulk_alloc = 1;
+
+ /* test failure of rte_cryptodev_sym_session_create() */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ MOCK_SET(rte_cryptodev_sym_session_create, NULL);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_SET(rte_cryptodev_sym_session_create, (struct rte_cryptodev_sym_session *)1);
+
+ /* test failure of rte_cryptodev_sym_session_init() */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ MOCK_SET(rte_cryptodev_sym_session_init, -1);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_SET(rte_cryptodev_sym_session_init, 0);
+
+ /* test failure of rte_crypto_op_attach_sym_session() */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ ut_rte_crypto_op_attach_sym_session = -1;
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ ut_rte_crypto_op_attach_sym_session = 0;
+}
+
+static void
+test_simple_write(void)
+{
+ /* Single element block size write */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = 1;
+ g_bdev_io->u.bdev.offset_blocks = 0;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_write;
+ g_crypto_bdev.crypto_bdev.blocklen = 512;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
+ CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT);
+ CU_ASSERT(g_io_ctx->cry_iov.iov_len == 512);
+ CU_ASSERT(g_io_ctx->cry_iov.iov_base != NULL);
+ CU_ASSERT(g_io_ctx->cry_offset_blocks == 0);
+ CU_ASSERT(g_io_ctx->cry_num_blocks == 1);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_write);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr != NULL);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == 512);
+
+ spdk_dma_free(g_io_ctx->cry_iov.iov_base);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_dst);
+}
+
+static void
+test_simple_read(void)
+{
+ /* Single element block size read */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = 1;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_read;
+ g_crypto_bdev.crypto_bdev.blocklen = 512;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
+ CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_DECRYPT);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_read);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
+
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
+}
+
+static void
+test_large_rw(void)
+{
+ unsigned block_len = 512;
+ unsigned num_blocks = CRYPTO_MAX_IO / block_len;
+ unsigned io_len = block_len * num_blocks;
+ unsigned i;
+
+ /* Multi block size read, multi-element */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
+ CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_DECRYPT);
+
+ for (i = 0; i < num_blocks; i++) {
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
+ }
+
+ /* Multi block size write, multi-element */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
+ CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT);
+
+ for (i = 0; i < num_blocks; i++) {
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_io_ctx->cry_iov.iov_len == io_len);
+ CU_ASSERT(g_io_ctx->cry_iov.iov_base != NULL);
+ CU_ASSERT(g_io_ctx->cry_offset_blocks == 0);
+ CU_ASSERT(g_io_ctx->cry_num_blocks == num_blocks);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr != NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
+ }
+ spdk_dma_free(g_io_ctx->cry_iov.iov_base);
+}
+
+static void
+test_dev_full(void)
+{
+ unsigned block_len = 512;
+ unsigned num_blocks = 2;
+ unsigned io_len = block_len * num_blocks;
+ unsigned i;
+
+ g_test_overflow = 1;
+
+ /* Multi block size read, multi-element */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_dev_full;
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_enqueue_mock = g_dequeue_mock = 1;
+ ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* this test only completes one of the 2 IOs (in the drain path) */
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
+ CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_DECRYPT);
+
+ for (i = 0; i < num_blocks; i++) {
+ /* One of the src_mbufs was freed because of the device full condition so
+ * we can't assert its value here.
+ */
+ CU_ASSERT(g_test_dev_full_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_dev_full_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_dev_full_ops[i]->sym->m_src == g_test_dev_full_ops[i]->sym->m_src);
+ CU_ASSERT(g_test_dev_full_ops[i]->sym->m_dst == NULL);
+ }
+
+ /* Only one of the 2 blocks in the test was freed on completion by design, so
+ * we need to free th other one here.
+ */
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
+ g_test_overflow = 0;
+}
+
+static void
+test_crazy_rw(void)
+{
+ unsigned block_len = 512;
+ int num_blocks = 4;
+ int i;
+
+ /* Multi block size read, single element, strange IOV makeup */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 3;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
+ g_bdev_io->u.bdev.iovs[1].iov_len = 1024;
+ g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 512;
+ g_bdev_io->u.bdev.iovs[2].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 512 + 1024;
+
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
+ CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_DECRYPT);
+
+ for (i = 0; i < num_blocks; i++) {
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
+ }
+
+ /* Multi block size write, single element strange IOV makeup */
+ num_blocks = 8;
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 4;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 2048;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
+ g_bdev_io->u.bdev.iovs[1].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 2048;
+ g_bdev_io->u.bdev.iovs[2].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 2048 + 512;
+ g_bdev_io->u.bdev.iovs[3].iov_len = 1024;
+ g_bdev_io->u.bdev.iovs[3].iov_base = &test_crazy_rw + 2048 + 512 + 512;
+
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
+ CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT);
+
+ for (i = 0; i < num_blocks; i++) {
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == g_test_crypto_ops[i]->sym->m_dst);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
+ }
+ spdk_dma_free(g_io_ctx->cry_iov.iov_base);
+}
+
+static void
+test_passthru(void)
+{
+ /* Make sure these follow our completion callback, test success & fail. */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
+ MOCK_SET(spdk_bdev_unmap_blocks, 0);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ MOCK_SET(spdk_bdev_unmap_blocks, -1);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_CLEAR(spdk_bdev_unmap_blocks);
+
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
+ MOCK_SET(spdk_bdev_flush_blocks, 0);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ MOCK_SET(spdk_bdev_flush_blocks, -1);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_CLEAR(spdk_bdev_flush_blocks);
+
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
+ MOCK_SET(spdk_bdev_reset, 0);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ MOCK_SET(spdk_bdev_reset, -1);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_CLEAR(spdk_bdev_reset);
+
+ /* We should never get a WZ command, we report that we don't support it. */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+}
+
+static void
+test_initdrivers(void)
+{
+ int rc;
+ static struct spdk_mempool *orig_mbuf_mp;
+ static struct spdk_mempool *orig_session_mp;
+
+ /* No drivers available, not an error though */
+ MOCK_SET(rte_eal_get_configuration, g_test_config);
+ MOCK_SET(rte_cryptodev_count, 0);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == 0);
+
+ /* Test failure of DPDK dev init. */
+ MOCK_SET(rte_cryptodev_count, 2);
+ MOCK_SET(rte_vdev_init, -1);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == -EINVAL);
+ MOCK_SET(rte_vdev_init, 0);
+
+ /* Can't create session pool. */
+ MOCK_SET(spdk_mempool_create, NULL);
+ orig_mbuf_mp = g_mbuf_mp;
+ orig_session_mp = g_session_mp;
+ rc = vbdev_crypto_init_crypto_drivers();
+ g_mbuf_mp = orig_mbuf_mp;
+ g_session_mp = orig_session_mp;
+ CU_ASSERT(rc == -ENOMEM);
+ MOCK_CLEAR(spdk_mempool_create);
+
+ /* Can't create op pool. These tests will alloc and free our g_mbuf_mp
+ * so save that off here and restore it after each test is over.
+ */
+ orig_mbuf_mp = g_mbuf_mp;
+ orig_session_mp = g_session_mp;
+ MOCK_SET(rte_crypto_op_pool_create, NULL);
+ rc = vbdev_crypto_init_crypto_drivers();
+ g_mbuf_mp = orig_mbuf_mp;
+ g_session_mp = orig_session_mp;
+ CU_ASSERT(rc == -ENOMEM);
+ MOCK_SET(rte_crypto_op_pool_create, (struct rte_mempool *)1);
+
+ /* Check resources are sufficient failure. */
+ orig_mbuf_mp = g_mbuf_mp;
+ orig_session_mp = g_session_mp;
+ rc = vbdev_crypto_init_crypto_drivers();
+ g_mbuf_mp = orig_mbuf_mp;
+ g_session_mp = orig_session_mp;
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Test crypto dev configure failure. */
+ MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
+ MOCK_SET(rte_cryptodev_info_get, 1);
+ MOCK_SET(rte_cryptodev_configure, -1);
+ orig_mbuf_mp = g_mbuf_mp;
+ orig_session_mp = g_session_mp;
+ rc = vbdev_crypto_init_crypto_drivers();
+ g_mbuf_mp = orig_mbuf_mp;
+ g_session_mp = orig_session_mp;
+ MOCK_SET(rte_cryptodev_configure, 0);
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Test failure of qp setup. */
+ MOCK_SET(rte_cryptodev_queue_pair_setup, -1);
+ orig_mbuf_mp = g_mbuf_mp;
+ orig_session_mp = g_session_mp;
+ rc = vbdev_crypto_init_crypto_drivers();
+ g_mbuf_mp = orig_mbuf_mp;
+ g_session_mp = orig_session_mp;
+ CU_ASSERT(rc == -EINVAL);
+ MOCK_SET(rte_cryptodev_queue_pair_setup, 0);
+
+ /* Test failure of dev start. */
+ MOCK_SET(rte_cryptodev_start, -1);
+ orig_mbuf_mp = g_mbuf_mp;
+ orig_session_mp = g_session_mp;
+ rc = vbdev_crypto_init_crypto_drivers();
+ g_mbuf_mp = orig_mbuf_mp;
+ g_session_mp = orig_session_mp;
+ CU_ASSERT(rc == -EINVAL);
+ MOCK_SET(rte_cryptodev_start, 0);
+
+ /* Test happy path. */
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == 0);
+}
+
+static void
+test_crypto_op_complete(void)
+{
+ /* Make sure completion code respects failure. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
+ g_completion_called = false;
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_completion_called == true);
+
+ /* Test read completion. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_completion_called = false;
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_completion_called == true);
+
+ /* Test write completion success. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_completion_called = false;
+ MOCK_SET(spdk_bdev_writev_blocks, 0);
+ /* Code under test will free this, if not ASAN will complain. */
+ g_io_ctx->cry_iov.iov_base = spdk_dma_malloc(16, 0x10, NULL);
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_completion_called == true);
+
+ /* Test write completion failed. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_completion_called = false;
+ MOCK_SET(spdk_bdev_writev_blocks, -1);
+ /* Code under test will free this, if not ASAN will complain. */
+ g_io_ctx->cry_iov.iov_base = spdk_dma_malloc(16, 0x10, NULL);
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_completion_called == true);
+
+ /* Test bogus type for this completion. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
+ g_completion_called = false;
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_completion_called == true);
+}
+
+static void
+test_supported_io(void)
+{
+ void *ctx = NULL;
+ bool rc = true;
+
+ /* Make sure we always report false to WZ, we need the bdev layer to
+ * send real 0's so we can encrypt/decrypt them.
+ */
+ rc = vbdev_crypto_io_type_supported(ctx, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
+ CU_ASSERT(rc == false);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("crypto", test_setup, test_cleanup);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (CU_add_test(suite, "test_error_paths",
+ test_error_paths) == NULL ||
+ CU_add_test(suite, "test_simple_write",
+ test_simple_write) == NULL ||
+ CU_add_test(suite, "test_simple_read",
+ test_simple_read) == NULL ||
+ CU_add_test(suite, "test_large_rw",
+ test_large_rw) == NULL ||
+ CU_add_test(suite, "test_dev_full",
+ test_dev_full) == NULL ||
+ CU_add_test(suite, "test_crazy_rw",
+ test_crazy_rw) == NULL ||
+ CU_add_test(suite, "test_passthru",
+ test_passthru) == NULL ||
+ CU_add_test(suite, "test_initdrivers",
+ test_initdrivers) == NULL ||
+ CU_add_test(suite, "test_crypto_op_complete",
+ test_crypto_op_complete) == NULL ||
+ CU_add_test(suite, "test_supported_io",
+ test_supported_io) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/rte_crypto.h b/src/spdk/test/unit/lib/bdev/crypto.c/rte_crypto.h
new file mode 100644
index 00000000..a53a71df
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/rte_crypto.h
@@ -0,0 +1,95 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * Copyright(c) 2016 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUcryptoION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTO_H_
+#define _RTE_CRYPTO_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* In order to mock some DPDK functions, we place headers here with the name name as the DPDK headers
+ * so these definitions wil be picked up. Only what's mocked is included.
+ */
+
+#include "rte_mbuf.h"
+#include "rte_mempool.h"
+#include "rte_crypto_sym.h"
+
+enum rte_crypto_op_type {
+ RTE_CRYPTO_OP_TYPE_UNDEFINED,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+};
+
+enum rte_crypto_op_status {
+ RTE_CRYPTO_OP_STATUS_SUCCESS,
+ RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION,
+ RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
+ RTE_CRYPTO_OP_STATUS_ERROR,
+};
+
+struct rte_crypto_op {
+ uint8_t type;
+ uint8_t status;
+ uint8_t sess_type;
+ uint8_t reserved[5];
+ struct rte_mempool *mempool;
+ rte_iova_t phys_addr;
+ __extension__
+ union {
+ struct rte_crypto_sym_op sym[0];
+ };
+};
+
+extern struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+ int socket_id);
+
+static inline unsigned
+rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
+
+static inline int
+rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
+ struct rte_cryptodev_sym_session *sess);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/rte_cryptodev.h b/src/spdk/test/unit/lib/bdev/crypto.c/rte_cryptodev.h
new file mode 100644
index 00000000..b941a20d
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/rte_cryptodev.h
@@ -0,0 +1,153 @@
+/*-
+ *
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
+ * Copyright 2014 6WIND S.A.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTODEV_H_
+#define _RTE_CRYPTODEV_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* In order to mock some DPDK functions, we place headers here with the name name as the DPDK headers
+ * so these definitions wil be picked up. Only what's mocked is included.
+ */
+
+uint8_t dummy[16];
+#define rte_crypto_op_ctod_offset(c, t, o) &dummy[0]
+
+#define RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER (1ULL << 9)
+
+struct rte_cryptodev_info {
+ const char *driver_name;
+ uint8_t driver_id;
+ struct rte_pci_device *pci_dev;
+ uint64_t feature_flags;
+ const struct rte_cryptodev_capabilities *capabilities;
+ unsigned max_nb_queue_pairs;
+ struct {
+ unsigned max_nb_sessions;
+ unsigned int max_nb_sessions_per_qp;
+ } sym;
+};
+
+enum rte_cryptodev_event_type {
+ RTE_CRYPTODEV_EVENT_UNKNOWN,
+ RTE_CRYPTODEV_EVENT_ERROR,
+ RTE_CRYPTODEV_EVENT_MAX
+};
+
+struct rte_cryptodev_qp_conf {
+ uint32_t nb_descriptors;
+};
+
+struct rte_cryptodev_stats {
+ uint64_t enqueued_count;
+ uint64_t dequeued_count;
+ uint64_t enqueue_err_count;
+ uint64_t dequeue_err_count;
+};
+
+#define RTE_CRYPTODEV_NAME_MAX_LEN (64)
+
+extern uint8_t
+rte_cryptodev_count(void);
+
+extern uint8_t
+rte_cryptodev_device_count_by_driver(uint8_t driver_id);
+
+extern int
+rte_cryptodev_socket_id(uint8_t dev_id);
+
+struct rte_cryptodev_config {
+ int socket_id;
+ uint16_t nb_queue_pairs;
+};
+
+extern int
+rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
+
+extern int
+rte_cryptodev_start(uint8_t dev_id);
+
+extern void
+rte_cryptodev_stop(uint8_t dev_id);
+
+extern int
+rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf, int socket_id,
+ struct rte_mempool *session_pool);
+
+extern void
+rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
+
+static inline uint16_t
+rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
+
+static inline uint16_t
+rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
+
+struct rte_cryptodev_sym_session {
+ __extension__ void *sess_private_data[0];
+};
+
+struct rte_cryptodev_asym_session {
+ __extension__ void *sess_private_data[0];
+};
+
+struct rte_crypto_asym_xform;
+
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(struct rte_mempool *mempool);
+
+int
+rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess);
+
+int
+rte_cryptodev_sym_session_init(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_crypto_sym_xform *xforms,
+ struct rte_mempool *mempool);
+
+int
+rte_cryptodev_sym_session_clear(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess);
+
+unsigned int
+rte_cryptodev_sym_get_private_session_size(uint8_t dev_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/rte_mbuf.h b/src/spdk/test/unit/lib/bdev/crypto.c/rte_mbuf.h
new file mode 100644
index 00000000..4d69f482
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/rte_mbuf.h
@@ -0,0 +1,148 @@
+/*-
+ *
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
+ * Copyright 2014 6WIND S.A.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MBUF_H_
+#define _RTE_MBUF_H_
+
+#include "rte_mempool.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* In order to mock some DPDK functions, we place headers here with the name name as the DPDK headers
+ * so these definitions wil be picked up. Only what's mocked is included.
+ */
+
+__extension__
+typedef void *MARKER[0];
+__extension__
+typedef uint8_t MARKER8[0];
+__extension__
+typedef uint64_t MARKER64[0];
+
+struct rte_mbuf {
+ MARKER cacheline0;
+ void *buf_addr;
+ RTE_STD_C11
+ union {
+ rte_iova_t buf_iova;
+ rte_iova_t buf_physaddr;
+ } __rte_aligned(sizeof(rte_iova_t));
+ MARKER64 rearm_data;
+ uint16_t data_off;
+ RTE_STD_C11
+ union {
+ rte_atomic16_t refcnt_atomic;
+ uint16_t refcnt;
+ };
+ uint16_t nb_segs;
+ uint16_t port;
+ uint64_t ol_flags;
+ MARKER rx_descriptor_fields1;
+ RTE_STD_C11
+ union {
+ uint32_t packet_type;
+ struct {
+ uint32_t l2_type: 4;
+ uint32_t l3_type: 4;
+ uint32_t l4_type: 4;
+ uint32_t tun_type: 4;
+ RTE_STD_C11
+ union {
+ uint8_t inner_esp_next_proto;
+ __extension__
+ struct {
+ uint8_t inner_l2_type: 4;
+ uint8_t inner_l3_type: 4;
+ };
+ };
+ uint32_t inner_l4_type: 4;
+ };
+ };
+ uint32_t pkt_len;
+ uint16_t data_len;
+ uint16_t vlan_tci;
+ union {
+ uint32_t rss;
+ struct {
+ RTE_STD_C11
+ union {
+ struct {
+ uint16_t hash;
+ uint16_t id;
+ };
+ uint32_t lo;
+ };
+ uint32_t hi;
+ } fdir;
+ struct {
+ uint32_t lo;
+ uint32_t hi;
+ } sched;
+ uint32_t usr;
+ } hash;
+ uint16_t vlan_tci_outer;
+ uint16_t buf_len;
+ uint64_t timestamp;
+ MARKER cacheline1 __rte_cache_min_aligned;
+ RTE_STD_C11
+ union {
+ void *userdata;
+ uint64_t udata64;
+ };
+ struct rte_mempool *pool;
+ struct rte_mbuf *next;
+ RTE_STD_C11
+ union {
+ uint64_t tx_offload;
+ __extension__
+ struct {
+ uint64_t l2_len: 7;
+ uint64_t l3_len: 9;
+ uint64_t l4_len: 8;
+ uint64_t tso_segsz: 16;
+ uint64_t outer_l3_len: 9;
+ uint64_t outer_l2_len: 7;
+ };
+ };
+ uint16_t priv_size;
+ uint16_t timesync;
+ uint32_t seqn;
+
+} __rte_cache_aligned;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/rte_mempool.h b/src/spdk/test/unit/lib/bdev/crypto.c/rte_mempool.h
new file mode 100644
index 00000000..5750d30f
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/rte_mempool.h
@@ -0,0 +1,145 @@
+/*-
+ *
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
+ * Copyright 2014 6WIND S.A.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MEMPOOL_H_
+#define _RTE_MEMPOOL_H_
+
+/**
+ * @file
+ * RTE Mempool.
+ *
+ * A memory pool is an allocator of fixed-size object. It is
+ * identified by its name, and uses a ring to store free objects. It
+ * provides some other optional services, like a per-core object
+ * cache, and an alignment helper to ensure that objects are padded
+ * to spread them equally on all RAM channels, ranks, and so on.
+ *
+ * Objects owned by a mempool should never be added in another
+ * mempool. When an object is freed using rte_mempool_put() or
+ * equivalent, the object data is not modified; the user can save some
+ * meta-data in the object data and retrieve them when allocating a
+ * new object.
+ *
+ * Note: the mempool implementation is not preemptible. An lcore must not be
+ * interrupted by another task that uses the same mempool (because it uses a
+ * ring which is not preemptible). Also, usual mempool functions like
+ * rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL
+ * thread due to the internal per-lcore cache. Due to the lack of caching,
+ * rte_mempool_get() or rte_mempool_put() performance will suffer when called
+ * by non-EAL threads. Instead, non-EAL threads should call
+ * rte_mempool_generic_get() or rte_mempool_generic_put() with a user cache
+ * created with rte_mempool_cache_create().
+ */
+
+#include <rte_config.h>
+#include <rte_spinlock.h>
+#include <rte_debug.h>
+#include <rte_ring.h>
+#include <rte_memcpy.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* In order to mock some DPDK functions, we place headers here with the name name as the DPDK headers
+ * so these definitions wil be picked up. Only what's mocked is included.
+ */
+
+STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr);
+STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr);
+struct rte_mempool {
+ char name[RTE_MEMZONE_NAMESIZE];
+ RTE_STD_C11
+ union {
+ void *pool_data;
+ uint64_t pool_id;
+ };
+ void *pool_config;
+ const struct rte_memzone *mz;
+ unsigned int flags;
+ int socket_id;
+ uint32_t size;
+ uint32_t cache_size;
+ uint32_t elt_size;
+ uint32_t header_size;
+ uint32_t trailer_size;
+ unsigned private_data_size;
+ int32_t ops_index;
+ struct rte_mempool_cache *local_cache;
+ uint32_t populated_size;
+ struct rte_mempool_objhdr_list elt_list;
+ uint32_t nb_mem_chunks;
+ struct rte_mempool_memhdr_list mem_list;
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
+#endif
+} __rte_cache_aligned;
+#define RTE_MEMPOOL_OPS_NAMESIZE 32
+typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp);
+typedef void (*rte_mempool_free_t)(struct rte_mempool *mp);
+typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp,
+ void *const *obj_table, unsigned int n);
+typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
+ void **obj_table, unsigned int n);
+typedef unsigned(*rte_mempool_get_count)(const struct rte_mempool *mp);
+typedef int (*rte_mempool_get_capabilities_t)(const struct rte_mempool *mp,
+ unsigned int *flags);
+typedef int (*rte_mempool_ops_register_memory_area_t)
+(const struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len);
+struct rte_mempool_ops {
+ char name[RTE_MEMPOOL_OPS_NAMESIZE];
+ rte_mempool_alloc_t alloc;
+ rte_mempool_free_t free;
+ rte_mempool_enqueue_t enqueue;
+ rte_mempool_dequeue_t dequeue;
+ rte_mempool_get_count get_count;
+ rte_mempool_get_capabilities_t get_capabilities;
+ rte_mempool_ops_register_memory_area_t register_memory_area;
+} __rte_cache_aligned;
+#define RTE_MEMPOOL_MAX_OPS_IDX 16
+struct rte_mempool_ops_table {
+ rte_spinlock_t sl;
+ uint32_t num_ops;
+ struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX];
+} __rte_cache_aligned;
+extern struct rte_mempool_ops_table rte_mempool_ops_table;
+void
+rte_mempool_free(struct rte_mempool *mp);
+static __rte_always_inline void
+rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
+ unsigned int n);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMPOOL_H_ */