summaryrefslogtreecommitdiffstats
path: root/src/spdk/test/unit/lib/bdev
diff options
context:
space:
mode:
Diffstat (limited to 'src/spdk/test/unit/lib/bdev')
-rw-r--r--src/spdk/test/unit/lib/bdev/Makefile50
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c1214
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev_raid.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev_raid.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev_raid.c/bdev_raid_ut.c2236
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/Makefile41
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c908
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/rte_crypto.h95
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/rte_cryptodev.h153
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/rte_mbuf.h148
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/rte_mempool.h145
-rw-r--r--src/spdk/test/unit/lib/bdev/gpt/Makefile44
-rw-r--r--src/spdk/test/unit/lib/bdev/gpt/gpt.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/gpt/gpt.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/bdev/gpt/gpt.c/gpt_ut.c297
-rw-r--r--src/spdk/test/unit/lib/bdev/mt/Makefile44
-rw-r--r--src/spdk/test/unit/lib/bdev/mt/bdev.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/mt/bdev.c/Makefile41
-rw-r--r--src/spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c1360
-rw-r--r--src/spdk/test/unit/lib/bdev/part.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/part.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/bdev/part.c/part_ut.c179
-rw-r--r--src/spdk/test/unit/lib/bdev/pmem/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/pmem/Makefile40
-rw-r--r--src/spdk/test/unit/lib/bdev/pmem/bdev_pmem_ut.c783
-rw-r--r--src/spdk/test/unit/lib/bdev/scsi_nvme.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/scsi_nvme.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/bdev/scsi_nvme.c/scsi_nvme_ut.c142
-rw-r--r--src/spdk/test/unit/lib/bdev/vbdev_lvol.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/vbdev_lvol.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut.c1410
34 files changed, 9577 insertions, 0 deletions
diff --git a/src/spdk/test/unit/lib/bdev/Makefile b/src/spdk/test/unit/lib/bdev/Makefile
new file mode 100644
index 00000000..61efba78
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/Makefile
@@ -0,0 +1,50 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = bdev.c part.c scsi_nvme.c gpt vbdev_lvol.c mt bdev_raid.c
+
+ifeq ($(CONFIG_CRYPTO),y)
+DIRS-y += crypto.c
+endif
+
+DIRS-$(CONFIG_PMDK) += pmem
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/bdev/bdev.c/.gitignore b/src/spdk/test/unit/lib/bdev/bdev.c/.gitignore
new file mode 100644
index 00000000..a5a22d0d
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev.c/.gitignore
@@ -0,0 +1 @@
+bdev_ut
diff --git a/src/spdk/test/unit/lib/bdev/bdev.c/Makefile b/src/spdk/test/unit/lib/bdev/bdev.c/Makefile
new file mode 100644
index 00000000..384fa27a
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2016 FUJITSU LIMITED, All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = bdev_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c b/src/spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c
new file mode 100644
index 00000000..3c14f712
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c
@@ -0,0 +1,1214 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+#include "unit/lib/json_mock.c"
+
+#include "spdk/config.h"
+/* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
+#undef SPDK_CONFIG_VTUNE
+
+#include "bdev/bdev.c"
+
+DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
+ const char *name), NULL);
+DEFINE_STUB(spdk_conf_section_get_nmval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
+DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
+
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name, const char *short_name,
+ uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_is_ptr, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+
+static void
+_bdev_send_msg(spdk_thread_fn fn, void *ctx, void *thread_ctx)
+{
+ fn(ctx);
+}
+
+void
+spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
+ int *sc, int *sk, int *asc, int *ascq)
+{
+}
+
+static int
+null_init(void)
+{
+ return 0;
+}
+
+static int
+null_clean(void)
+{
+ return 0;
+}
+
+static int
+stub_destruct(void *ctx)
+{
+ return 0;
+}
+
+struct ut_expected_io {
+ uint8_t type;
+ uint64_t offset;
+ uint64_t length;
+ int iovcnt;
+ struct iovec iov[BDEV_IO_NUM_CHILD_IOV];
+ TAILQ_ENTRY(ut_expected_io) link;
+};
+
+struct bdev_ut_channel {
+ TAILQ_HEAD(, spdk_bdev_io) outstanding_io;
+ uint32_t outstanding_io_count;
+ TAILQ_HEAD(, ut_expected_io) expected_io;
+};
+
+static bool g_io_done;
+static enum spdk_bdev_io_status g_io_status;
+static uint32_t g_bdev_ut_io_device;
+static struct bdev_ut_channel *g_bdev_ut_channel;
+
+static struct ut_expected_io *
+ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt)
+{
+ struct ut_expected_io *expected_io;
+
+ expected_io = calloc(1, sizeof(*expected_io));
+ SPDK_CU_ASSERT_FATAL(expected_io != NULL);
+
+ expected_io->type = type;
+ expected_io->offset = offset;
+ expected_io->length = length;
+ expected_io->iovcnt = iovcnt;
+
+ return expected_io;
+}
+
+static void
+ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len)
+{
+ expected_io->iov[pos].iov_base = base;
+ expected_io->iov[pos].iov_len = len;
+}
+
+static void
+stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
+{
+ struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
+ struct ut_expected_io *expected_io;
+ struct iovec *iov, *expected_iov;
+ int i;
+
+ TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
+ ch->outstanding_io_count++;
+
+ expected_io = TAILQ_FIRST(&ch->expected_io);
+ if (expected_io == NULL) {
+ return;
+ }
+ TAILQ_REMOVE(&ch->expected_io, expected_io, link);
+
+ if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) {
+ CU_ASSERT(bdev_io->type == expected_io->type);
+ }
+
+ if (expected_io->length == 0) {
+ free(expected_io);
+ return;
+ }
+
+ CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks);
+ CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks);
+
+ if (expected_io->iovcnt == 0) {
+ free(expected_io);
+ /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */
+ return;
+ }
+
+ CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
+ for (i = 0; i < expected_io->iovcnt; i++) {
+ iov = &bdev_io->u.bdev.iovs[i];
+ expected_iov = &expected_io->iov[i];
+ CU_ASSERT(iov->iov_len == expected_iov->iov_len);
+ CU_ASSERT(iov->iov_base == expected_iov->iov_base);
+ }
+
+ free(expected_io);
+}
+
+static uint32_t
+stub_complete_io(uint32_t num_to_complete)
+{
+ struct bdev_ut_channel *ch = g_bdev_ut_channel;
+ struct spdk_bdev_io *bdev_io;
+ uint32_t num_completed = 0;
+
+ while (num_completed < num_to_complete) {
+ if (TAILQ_EMPTY(&ch->outstanding_io)) {
+ break;
+ }
+ bdev_io = TAILQ_FIRST(&ch->outstanding_io);
+ TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link);
+ ch->outstanding_io_count--;
+ spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
+ num_completed++;
+ }
+
+ return num_completed;
+}
+
+static struct spdk_io_channel *
+bdev_ut_get_io_channel(void *ctx)
+{
+ return spdk_get_io_channel(&g_bdev_ut_io_device);
+}
+
+static bool
+stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
+{
+ return true;
+}
+
+static struct spdk_bdev_fn_table fn_table = {
+ .destruct = stub_destruct,
+ .submit_request = stub_submit_request,
+ .get_io_channel = bdev_ut_get_io_channel,
+ .io_type_supported = stub_io_type_supported,
+};
+
+static int
+bdev_ut_create_ch(void *io_device, void *ctx_buf)
+{
+ struct bdev_ut_channel *ch = ctx_buf;
+
+ CU_ASSERT(g_bdev_ut_channel == NULL);
+ g_bdev_ut_channel = ch;
+
+ TAILQ_INIT(&ch->outstanding_io);
+ ch->outstanding_io_count = 0;
+ TAILQ_INIT(&ch->expected_io);
+ return 0;
+}
+
+static void
+bdev_ut_destroy_ch(void *io_device, void *ctx_buf)
+{
+ CU_ASSERT(g_bdev_ut_channel != NULL);
+ g_bdev_ut_channel = NULL;
+}
+
+static int
+bdev_ut_module_init(void)
+{
+ spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch,
+ sizeof(struct bdev_ut_channel), NULL);
+ return 0;
+}
+
+static void
+bdev_ut_module_fini(void)
+{
+ spdk_io_device_unregister(&g_bdev_ut_io_device, NULL);
+}
+
+struct spdk_bdev_module bdev_ut_if = {
+ .name = "bdev_ut",
+ .module_init = bdev_ut_module_init,
+ .module_fini = bdev_ut_module_fini,
+};
+
+static void vbdev_ut_examine(struct spdk_bdev *bdev);
+
+static int
+vbdev_ut_module_init(void)
+{
+ return 0;
+}
+
+static void
+vbdev_ut_module_fini(void)
+{
+}
+
+struct spdk_bdev_module vbdev_ut_if = {
+ .name = "vbdev_ut",
+ .module_init = vbdev_ut_module_init,
+ .module_fini = vbdev_ut_module_fini,
+ .examine_config = vbdev_ut_examine,
+};
+
+SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if)
+SPDK_BDEV_MODULE_REGISTER(&vbdev_ut_if)
+
+static void
+vbdev_ut_examine(struct spdk_bdev *bdev)
+{
+ spdk_bdev_module_examine_done(&vbdev_ut_if);
+}
+
+static struct spdk_bdev *
+allocate_bdev(char *name)
+{
+ struct spdk_bdev *bdev;
+ int rc;
+
+ bdev = calloc(1, sizeof(*bdev));
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+
+ bdev->name = name;
+ bdev->fn_table = &fn_table;
+ bdev->module = &bdev_ut_if;
+ bdev->blockcnt = 1024;
+ bdev->blocklen = 512;
+
+ rc = spdk_bdev_register(bdev);
+ CU_ASSERT(rc == 0);
+
+ return bdev;
+}
+
+static struct spdk_bdev *
+allocate_vbdev(char *name, struct spdk_bdev *base1, struct spdk_bdev *base2)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev *array[2];
+ int rc;
+
+ bdev = calloc(1, sizeof(*bdev));
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+
+ bdev->name = name;
+ bdev->fn_table = &fn_table;
+ bdev->module = &vbdev_ut_if;
+
+ /* vbdev must have at least one base bdev */
+ CU_ASSERT(base1 != NULL);
+
+ array[0] = base1;
+ array[1] = base2;
+
+ rc = spdk_vbdev_register(bdev, array, base2 == NULL ? 1 : 2);
+ CU_ASSERT(rc == 0);
+
+ return bdev;
+}
+
+static void
+free_bdev(struct spdk_bdev *bdev)
+{
+ spdk_bdev_unregister(bdev, NULL, NULL);
+ memset(bdev, 0xFF, sizeof(*bdev));
+ free(bdev);
+}
+
+static void
+free_vbdev(struct spdk_bdev *bdev)
+{
+ spdk_bdev_unregister(bdev, NULL, NULL);
+ memset(bdev, 0xFF, sizeof(*bdev));
+ free(bdev);
+}
+
+static void
+get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc)
+{
+ const char *bdev_name;
+
+ CU_ASSERT(bdev != NULL);
+ CU_ASSERT(rc == 0);
+ bdev_name = spdk_bdev_get_name(bdev);
+ CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0");
+
+ free(stat);
+ free_bdev(bdev);
+}
+
+static void
+get_device_stat_test(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_io_stat *stat;
+
+ bdev = allocate_bdev("bdev0");
+ stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
+ if (stat == NULL) {
+ free_bdev(bdev);
+ return;
+ }
+ spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, NULL);
+}
+
+static void
+open_write_test(void)
+{
+ struct spdk_bdev *bdev[9];
+ struct spdk_bdev_desc *desc[9] = {};
+ int rc;
+
+ /*
+ * Create a tree of bdevs to test various open w/ write cases.
+ *
+ * bdev0 through bdev3 are physical block devices, such as NVMe
+ * namespaces or Ceph block devices.
+ *
+ * bdev4 is a virtual bdev with multiple base bdevs. This models
+ * caching or RAID use cases.
+ *
+ * bdev5 through bdev7 are all virtual bdevs with the same base
+ * bdev (except bdev7). This models partitioning or logical volume
+ * use cases.
+ *
+ * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs
+ * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This
+ * models caching, RAID, partitioning or logical volumes use cases.
+ *
+ * bdev8 is a virtual bdev with multiple base bdevs, but these
+ * base bdevs are themselves virtual bdevs.
+ *
+ * bdev8
+ * |
+ * +----------+
+ * | |
+ * bdev4 bdev5 bdev6 bdev7
+ * | | | |
+ * +---+---+ +---+ + +---+---+
+ * | | \ | / \
+ * bdev0 bdev1 bdev2 bdev3
+ */
+
+ bdev[0] = allocate_bdev("bdev0");
+ rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[1] = allocate_bdev("bdev1");
+ rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[2] = allocate_bdev("bdev2");
+ rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[3] = allocate_bdev("bdev3");
+ rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[4] = allocate_vbdev("bdev4", bdev[0], bdev[1]);
+ rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[5] = allocate_vbdev("bdev5", bdev[2], NULL);
+ rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[6] = allocate_vbdev("bdev6", bdev[2], NULL);
+
+ bdev[7] = allocate_vbdev("bdev7", bdev[2], bdev[3]);
+
+ bdev[8] = allocate_vbdev("bdev8", bdev[4], bdev[5]);
+
+ /* Open bdev0 read-only. This should succeed. */
+ rc = spdk_bdev_open(bdev[0], false, NULL, NULL, &desc[0]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
+ spdk_bdev_close(desc[0]);
+
+ /*
+ * Open bdev1 read/write. This should fail since bdev1 has been claimed
+ * by a vbdev module.
+ */
+ rc = spdk_bdev_open(bdev[1], true, NULL, NULL, &desc[1]);
+ CU_ASSERT(rc == -EPERM);
+
+ /*
+ * Open bdev4 read/write. This should fail since bdev3 has been claimed
+ * by a vbdev module.
+ */
+ rc = spdk_bdev_open(bdev[4], true, NULL, NULL, &desc[4]);
+ CU_ASSERT(rc == -EPERM);
+
+ /* Open bdev4 read-only. This should succeed. */
+ rc = spdk_bdev_open(bdev[4], false, NULL, NULL, &desc[4]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc[4] != NULL);
+ spdk_bdev_close(desc[4]);
+
+ /*
+ * Open bdev8 read/write. This should succeed since it is a leaf
+ * bdev.
+ */
+ rc = spdk_bdev_open(bdev[8], true, NULL, NULL, &desc[8]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc[8] != NULL);
+ spdk_bdev_close(desc[8]);
+
+ /*
+ * Open bdev5 read/write. This should fail since bdev4 has been claimed
+ * by a vbdev module.
+ */
+ rc = spdk_bdev_open(bdev[5], true, NULL, NULL, &desc[5]);
+ CU_ASSERT(rc == -EPERM);
+
+ /* Open bdev4 read-only. This should succeed. */
+ rc = spdk_bdev_open(bdev[5], false, NULL, NULL, &desc[5]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc[5] != NULL);
+ spdk_bdev_close(desc[5]);
+
+ free_vbdev(bdev[8]);
+
+ free_vbdev(bdev[5]);
+ free_vbdev(bdev[6]);
+ free_vbdev(bdev[7]);
+
+ free_vbdev(bdev[4]);
+
+ free_bdev(bdev[0]);
+ free_bdev(bdev[1]);
+ free_bdev(bdev[2]);
+ free_bdev(bdev[3]);
+}
+
+static void
+bytes_to_blocks_test(void)
+{
+ struct spdk_bdev bdev;
+ uint64_t offset_blocks, num_blocks;
+
+ memset(&bdev, 0, sizeof(bdev));
+
+ bdev.blocklen = 512;
+
+ /* All parameters valid */
+ offset_blocks = 0;
+ num_blocks = 0;
+ CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0);
+ CU_ASSERT(offset_blocks == 1);
+ CU_ASSERT(num_blocks == 2);
+
+ /* Offset not a block multiple */
+ CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0);
+
+ /* Length not a block multiple */
+ CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0);
+}
+
+static void
+num_blocks_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ int rc;
+
+ memset(&bdev, 0, sizeof(bdev));
+ bdev.name = "num_blocks";
+ bdev.fn_table = &fn_table;
+ bdev.module = &bdev_ut_if;
+ spdk_bdev_register(&bdev);
+ spdk_bdev_notify_blockcnt_change(&bdev, 50);
+
+ /* Growing block number */
+ CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0);
+ /* Shrinking block number */
+ CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0);
+
+ /* In case bdev opened */
+ rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+
+ /* Growing block number */
+ CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0);
+ /* Shrinking block number */
+ CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0);
+
+ spdk_bdev_close(desc);
+ spdk_bdev_unregister(&bdev, NULL, NULL);
+}
+
+static void
+io_valid_test(void)
+{
+ struct spdk_bdev bdev;
+
+ memset(&bdev, 0, sizeof(bdev));
+
+ bdev.blocklen = 512;
+ spdk_bdev_notify_blockcnt_change(&bdev, 100);
+
+ /* All parameters valid */
+ CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 1, 2) == true);
+
+ /* Last valid block */
+ CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 1) == true);
+
+ /* Offset past end of bdev */
+ CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 100, 1) == false);
+
+ /* Offset + length past end of bdev */
+ CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 2) == false);
+
+ /* Offset near end of uint64_t range (2^64 - 1) */
+ CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
+}
+
+static void
+alias_add_del_test(void)
+{
+ struct spdk_bdev *bdev[3];
+ int rc;
+
+ /* Creating and registering bdevs */
+ bdev[0] = allocate_bdev("bdev0");
+ SPDK_CU_ASSERT_FATAL(bdev[0] != 0);
+
+ bdev[1] = allocate_bdev("bdev1");
+ SPDK_CU_ASSERT_FATAL(bdev[1] != 0);
+
+ bdev[2] = allocate_bdev("bdev2");
+ SPDK_CU_ASSERT_FATAL(bdev[2] != 0);
+
+ /*
+ * Trying adding an alias identical to name.
+ * Alias is identical to name, so it can not be added to aliases list
+ */
+ rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name);
+ CU_ASSERT(rc == -EEXIST);
+
+ /*
+ * Trying to add empty alias,
+ * this one should fail
+ */
+ rc = spdk_bdev_alias_add(bdev[0], NULL);
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Trying adding same alias to two different registered bdevs */
+
+ /* Alias is used first time, so this one should pass */
+ rc = spdk_bdev_alias_add(bdev[0], "proper alias 0");
+ CU_ASSERT(rc == 0);
+
+ /* Alias was added to another bdev, so this one should fail */
+ rc = spdk_bdev_alias_add(bdev[1], "proper alias 0");
+ CU_ASSERT(rc == -EEXIST);
+
+ /* Alias is used first time, so this one should pass */
+ rc = spdk_bdev_alias_add(bdev[1], "proper alias 1");
+ CU_ASSERT(rc == 0);
+
+ /* Trying removing an alias from registered bdevs */
+
+ /* Alias is not on a bdev aliases list, so this one should fail */
+ rc = spdk_bdev_alias_del(bdev[0], "not existing");
+ CU_ASSERT(rc == -ENOENT);
+
+ /* Alias is present on a bdev aliases list, so this one should pass */
+ rc = spdk_bdev_alias_del(bdev[0], "proper alias 0");
+ CU_ASSERT(rc == 0);
+
+ /* Alias is present on a bdev aliases list, so this one should pass */
+ rc = spdk_bdev_alias_del(bdev[1], "proper alias 1");
+ CU_ASSERT(rc == 0);
+
+ /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */
+ rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name);
+ CU_ASSERT(rc != 0);
+
+ /* Trying to del all alias from empty alias list */
+ spdk_bdev_alias_del_all(bdev[2]);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases));
+
+ /* Trying to del all alias from non-empty alias list */
+ rc = spdk_bdev_alias_add(bdev[2], "alias0");
+ CU_ASSERT(rc == 0);
+ rc = spdk_bdev_alias_add(bdev[2], "alias1");
+ CU_ASSERT(rc == 0);
+ spdk_bdev_alias_del_all(bdev[2]);
+ CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases));
+
+ /* Unregister and free bdevs */
+ spdk_bdev_unregister(bdev[0], NULL, NULL);
+ spdk_bdev_unregister(bdev[1], NULL, NULL);
+ spdk_bdev_unregister(bdev[2], NULL, NULL);
+
+ free(bdev[0]);
+ free(bdev[1]);
+ free(bdev[2]);
+}
+
+static void
+io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ g_io_done = true;
+ g_io_status = bdev_io->internal.status;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+bdev_init_cb(void *arg, int rc)
+{
+ CU_ASSERT(rc == 0);
+}
+
+static void
+bdev_fini_cb(void *arg)
+{
+}
+
+struct bdev_ut_io_wait_entry {
+ struct spdk_bdev_io_wait_entry entry;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_desc *desc;
+ bool submitted;
+};
+
+static void
+io_wait_cb(void *arg)
+{
+ struct bdev_ut_io_wait_entry *entry = arg;
+ int rc;
+
+ rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ entry->submitted = true;
+}
+
+static void
+bdev_io_wait_test(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_opts bdev_opts = {
+ .bdev_io_pool_size = 4,
+ .bdev_io_cache_size = 2,
+ };
+ struct bdev_ut_io_wait_entry io_wait_entry;
+ struct bdev_ut_io_wait_entry io_wait_entry2;
+ int rc;
+
+ rc = spdk_bdev_set_opts(&bdev_opts);
+ CU_ASSERT(rc == 0);
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == -ENOMEM);
+
+ io_wait_entry.entry.bdev = bdev;
+ io_wait_entry.entry.cb_fn = io_wait_cb;
+ io_wait_entry.entry.cb_arg = &io_wait_entry;
+ io_wait_entry.io_ch = io_ch;
+ io_wait_entry.desc = desc;
+ io_wait_entry.submitted = false;
+ /* Cannot use the same io_wait_entry for two different calls. */
+ memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry));
+ io_wait_entry2.entry.cb_arg = &io_wait_entry2;
+
+ /* Queue two I/O waits. */
+ rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(io_wait_entry.submitted == false);
+ rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(io_wait_entry2.submitted == false);
+
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
+ CU_ASSERT(io_wait_entry.submitted == true);
+ CU_ASSERT(io_wait_entry2.submitted == false);
+
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
+ CU_ASSERT(io_wait_entry2.submitted == true);
+
+ stub_complete_io(4);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+}
+
+static void
+bdev_io_spans_boundary_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_bdev_io bdev_io;
+
+ memset(&bdev, 0, sizeof(bdev));
+
+ bdev.optimal_io_boundary = 0;
+ bdev_io.bdev = &bdev;
+
+ /* bdev has no optimal_io_boundary set - so this should return false. */
+ CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
+
+ bdev.optimal_io_boundary = 32;
+ bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
+
+ /* RESETs are not based on LBAs - so this should return false. */
+ CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
+
+ bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
+ bdev_io.u.bdev.offset_blocks = 0;
+ bdev_io.u.bdev.num_blocks = 32;
+
+ /* This I/O run right up to, but does not cross, the boundary - so this should return false. */
+ CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
+
+ bdev_io.u.bdev.num_blocks = 33;
+
+ /* This I/O spans a boundary. */
+ CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == true);
+}
+
+static void
+bdev_io_split(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_opts bdev_opts = {
+ .bdev_io_pool_size = 512,
+ .bdev_io_cache_size = 64,
+ };
+ struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2];
+ struct ut_expected_io *expected_io;
+ uint64_t i;
+ int rc;
+
+ rc = spdk_bdev_set_opts(&bdev_opts);
+ CU_ASSERT(rc == 0);
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+
+ bdev->optimal_io_boundary = 16;
+ bdev->split_on_optimal_io_boundary = false;
+
+ g_io_done = false;
+
+ /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ bdev->split_on_optimal_io_boundary = true;
+
+ /* Now test that a single-vector command is split correctly.
+ * Offset 14, length 8, payload 0xF000
+ * Child - Offset 14, length 2, payload 0xF000
+ * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
+ *
+ * Set up the expected values before calling spdk_bdev_read_blocks
+ */
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* spdk_bdev_read_blocks will submit the first child immediately. */
+ rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
+ stub_complete_io(2);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ /* Now set up a more complex, multi-vector command that needs to be split,
+ * including splitting iovecs.
+ */
+ iov[0].iov_base = (void *)0x10000;
+ iov[0].iov_len = 512;
+ iov[1].iov_base = (void *)0x20000;
+ iov[1].iov_len = 20 * 512;
+ iov[2].iov_base = (void *)0x30000;
+ iov[2].iov_len = 11 * 512;
+
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
+ ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
+ ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
+ ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
+ stub_complete_io(3);
+ CU_ASSERT(g_io_done == true);
+
+ /* Test multi vector command that needs to be split by strip and then needs to be
+ * split further due to the capacity of child iovs.
+ */
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) {
+ iov[i].iov_base = (void *)((i + 1) * 0x10000);
+ iov[i].iov_len = 512;
+ }
+
+ bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV,
+ BDEV_IO_NUM_CHILD_IOV);
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
+ ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512);
+ }
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
+ BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV);
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
+ ut_expected_io_set_iov(expected_io, i,
+ (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512);
+ }
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
+ BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ /* Test multi vector command that needs to be split by strip and then needs to be
+ * split further due to the capacity of child iovs, but fails to split. The cause
+ * of failure of split is that the length of an iovec is not multiple of block size.
+ */
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
+ iov[i].iov_base = (void *)((i + 1) * 0x10000);
+ iov[i].iov_len = 512;
+ }
+ iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000);
+ iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
+
+ bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
+ g_io_done = false;
+ g_io_status = 0;
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
+ BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
+
+ /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be
+ * split, so test that.
+ */
+ bdev->optimal_io_boundary = 15;
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+
+ /* Test an UNMAP. This should also not be split. */
+ bdev->optimal_io_boundary = 16;
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+
+ /* Test a FLUSH. This should also not be split. */
+ bdev->optimal_io_boundary = 16;
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+}
+
+static void
+bdev_io_split_with_io_wait(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *channel;
+ struct spdk_bdev_mgmt_channel *mgmt_ch;
+ struct spdk_bdev_opts bdev_opts = {
+ .bdev_io_pool_size = 2,
+ .bdev_io_cache_size = 1,
+ };
+ struct iovec iov[3];
+ struct ut_expected_io *expected_io;
+ int rc;
+
+ rc = spdk_bdev_set_opts(&bdev_opts);
+ CU_ASSERT(rc == 0);
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+ channel = spdk_io_channel_get_ctx(io_ch);
+ mgmt_ch = channel->shared_resource->mgmt_ch;
+
+ bdev->optimal_io_boundary = 16;
+ bdev->split_on_optimal_io_boundary = true;
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+
+ /* Now test that a single-vector command is split correctly.
+ * Offset 14, length 8, payload 0xF000
+ * Child - Offset 14, length 2, payload 0xF000
+ * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
+ *
+ * Set up the expected values before calling spdk_bdev_read_blocks
+ */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* The following children will be submitted sequentially due to the capacity of
+ * spdk_bdev_io.
+ */
+
+ /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */
+ rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+
+ /* Completing the first read I/O will submit the first child */
+ stub_complete_io(1);
+ CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+
+ /* Completing the first child will submit the second child */
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+
+ /* Complete the second child I/O. This should result in our callback getting
+ * invoked since the parent I/O is now complete.
+ */
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ /* Now set up a more complex, multi-vector command that needs to be split,
+ * including splitting iovecs.
+ */
+ iov[0].iov_base = (void *)0x10000;
+ iov[0].iov_len = 512;
+ iov[1].iov_base = (void *)0x20000;
+ iov[1].iov_len = 20 * 512;
+ iov[2].iov_base = (void *)0x30000;
+ iov[2].iov_len = 11 * 512;
+
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
+ ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
+ ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
+ ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ /* The following children will be submitted sequentially due to the capacity of
+ * spdk_bdev_io.
+ */
+
+ /* Completing the first child will submit the second child */
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == false);
+
+ /* Completing the second child will submit the third child */
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == false);
+
+ /* Completing the third child will result in our callback getting invoked
+ * since the parent I/O is now complete.
+ */
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("bdev", null_init, null_clean);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "bytes_to_blocks_test", bytes_to_blocks_test) == NULL ||
+ CU_add_test(suite, "num_blocks_test", num_blocks_test) == NULL ||
+ CU_add_test(suite, "io_valid", io_valid_test) == NULL ||
+ CU_add_test(suite, "open_write", open_write_test) == NULL ||
+ CU_add_test(suite, "alias_add_del", alias_add_del_test) == NULL ||
+ CU_add_test(suite, "get_device_stat", get_device_stat_test) == NULL ||
+ CU_add_test(suite, "bdev_io_wait", bdev_io_wait_test) == NULL ||
+ CU_add_test(suite, "bdev_io_spans_boundary", bdev_io_spans_boundary_test) == NULL ||
+ CU_add_test(suite, "bdev_io_split", bdev_io_split) == NULL ||
+ CU_add_test(suite, "bdev_io_split_with_io_wait", bdev_io_split_with_io_wait) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ spdk_allocate_thread(_bdev_send_msg, NULL, NULL, NULL, "thread0");
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ spdk_free_thread();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/bdev_raid.c/.gitignore b/src/spdk/test/unit/lib/bdev/bdev_raid.c/.gitignore
new file mode 100644
index 00000000..98d1a166
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev_raid.c/.gitignore
@@ -0,0 +1 @@
+bdev_raid_ut
diff --git a/src/spdk/test/unit/lib/bdev/bdev_raid.c/Makefile b/src/spdk/test/unit/lib/bdev/bdev_raid.c/Makefile
new file mode 100644
index 00000000..9739cb44
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev_raid.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = bdev_raid_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/bdev_raid.c/bdev_raid_ut.c b/src/spdk/test/unit/lib/bdev/bdev_raid.c/bdev_raid_ut.c
new file mode 100644
index 00000000..ffa466da
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev_raid.c/bdev_raid_ut.c
@@ -0,0 +1,2236 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "spdk/env.h"
+#include "spdk_internal/mock.h"
+#include "bdev/raid/bdev_raid.c"
+#include "bdev/raid/bdev_raid_rpc.c"
+
+#define MAX_BASE_DRIVES 255
+#define MAX_RAIDS 31
+#define INVALID_IO_SUBMIT 0xFFFF
+
+/* Data structure to capture the output of IO for verification */
+struct io_output {
+ struct spdk_bdev_desc *desc;
+ struct spdk_io_channel *ch;
+ uint64_t offset_blocks;
+ uint64_t num_blocks;
+ spdk_bdev_io_completion_cb cb;
+ void *cb_arg;
+ enum spdk_bdev_io_type iotype;
+};
+
+/* Different test options, more options to test can be added here */
+uint32_t g_blklen_opts[] = {512, 4096};
+uint32_t g_strip_opts[] = {64, 128, 256, 512, 1024, 2048};
+uint32_t g_iosize_opts[] = {256, 512, 1024};
+uint32_t g_max_qd_opts[] = {64, 128, 256, 512, 1024, 2048};
+
+/* Globals */
+int g_bdev_io_submit_status;
+struct io_output *g_io_output = NULL;
+uint32_t g_io_output_index;
+uint32_t g_io_comp_status;
+bool g_child_io_status_flag;
+void *rpc_req;
+uint32_t rpc_req_size;
+TAILQ_HEAD(bdev, spdk_bdev);
+struct bdev g_bdev_list;
+TAILQ_HEAD(waitq, spdk_bdev_io_wait_entry);
+struct waitq g_io_waitq;
+uint32_t g_block_len;
+uint32_t g_strip_size;
+uint32_t g_max_io_size;
+uint32_t g_max_qd;
+uint8_t g_max_base_drives;
+uint8_t g_max_raids;
+uint8_t g_ignore_io_output;
+uint8_t g_rpc_err;
+char *g_get_raids_output[MAX_RAIDS];
+uint32_t g_get_raids_count;
+uint8_t g_json_beg_res_ret_err;
+uint8_t g_json_decode_obj_err;
+uint8_t g_json_decode_obj_construct;
+uint8_t g_config_level_create = 0;
+uint8_t g_test_multi_raids;
+
+/* Set randomly test options, in every run it is different */
+static void
+set_test_opts(void)
+{
+ uint32_t seed = time(0);
+
+ /* Generate random test options */
+ srand(seed);
+ g_max_base_drives = (rand() % MAX_BASE_DRIVES) + 1;
+ g_max_raids = (rand() % MAX_RAIDS) + 1;
+ g_block_len = g_blklen_opts[rand() % SPDK_COUNTOF(g_blklen_opts)];
+ g_strip_size = g_strip_opts[rand() % SPDK_COUNTOF(g_strip_opts)];
+ g_max_io_size = g_iosize_opts[rand() % SPDK_COUNTOF(g_iosize_opts)];
+ g_max_qd = g_max_qd_opts[rand() % SPDK_COUNTOF(g_max_qd_opts)];
+
+ printf("Test Options, seed = %u\n", seed);
+ printf("blocklen = %u, strip_size = %u, max_io_size = %u, max_qd = %u, g_max_base_drives = %u, g_max_raids = %u\n",
+ g_block_len, g_strip_size, g_max_io_size, g_max_qd, g_max_base_drives, g_max_raids);
+}
+
+/* Set globals before every test run */
+static void
+set_globals(void)
+{
+ uint32_t max_splits;
+
+ g_bdev_io_submit_status = 0;
+ if (g_max_io_size < g_strip_size) {
+ max_splits = 2;
+ } else {
+ max_splits = (g_max_io_size / g_strip_size) + 1;
+ }
+ g_io_output = calloc(max_splits, sizeof(struct io_output));
+ SPDK_CU_ASSERT_FATAL(g_io_output != NULL);
+ g_io_output_index = 0;
+ memset(g_get_raids_output, 0, sizeof(g_get_raids_output));
+ g_get_raids_count = 0;
+ g_io_comp_status = 0;
+ g_ignore_io_output = 0;
+ g_config_level_create = 0;
+ g_rpc_err = 0;
+ g_test_multi_raids = 0;
+ g_child_io_status_flag = true;
+ TAILQ_INIT(&g_bdev_list);
+ TAILQ_INIT(&g_io_waitq);
+ rpc_req = NULL;
+ rpc_req_size = 0;
+ g_json_beg_res_ret_err = 0;
+ g_json_decode_obj_err = 0;
+ g_json_decode_obj_construct = 0;
+}
+
+static void
+base_bdevs_cleanup(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev *bdev_next;
+
+ if (!TAILQ_EMPTY(&g_bdev_list)) {
+ TAILQ_FOREACH_SAFE(bdev, &g_bdev_list, internal.link, bdev_next) {
+ free(bdev->name);
+ TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
+ free(bdev);
+ }
+ }
+}
+
+static void
+check_and_remove_raid_bdev(struct raid_bdev_config *raid_cfg)
+{
+ struct raid_bdev *raid_bdev;
+
+ /* Get the raid structured allocated if exists */
+ raid_bdev = raid_cfg->raid_bdev;
+ if (raid_bdev == NULL) {
+ return;
+ }
+
+ for (uint32_t i = 0; i < raid_bdev->num_base_bdevs; i++) {
+ assert(raid_bdev->base_bdev_info != NULL);
+ if (raid_bdev->base_bdev_info[i].bdev) {
+ raid_bdev_free_base_bdev_resource(raid_bdev, i);
+ }
+ }
+ assert(raid_bdev->num_base_bdevs_discovered == 0);
+ raid_bdev_cleanup(raid_bdev);
+}
+
+/* Reset globals */
+static void
+reset_globals(void)
+{
+ if (g_io_output) {
+ free(g_io_output);
+ g_io_output = NULL;
+ }
+ rpc_req = NULL;
+ rpc_req_size = 0;
+}
+
+void
+spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb,
+ uint64_t len)
+{
+ CU_ASSERT(false);
+}
+
+/* Store the IO completion status in global variable to verify by various tests */
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+ g_io_comp_status = ((status == SPDK_BDEV_IO_STATUS_SUCCESS) ? true : false);
+}
+
+/* It will cache the split IOs for verification */
+int
+spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ struct io_output *p = &g_io_output[g_io_output_index];
+ struct spdk_bdev_io *child_io;
+
+ if (g_ignore_io_output) {
+ return 0;
+ }
+
+ if (g_max_io_size < g_strip_size) {
+ SPDK_CU_ASSERT_FATAL(g_io_output_index < 2);
+ } else {
+ SPDK_CU_ASSERT_FATAL(g_io_output_index < (g_max_io_size / g_strip_size) + 1);
+ }
+ if (g_bdev_io_submit_status == 0) {
+ p->desc = desc;
+ p->ch = ch;
+ p->offset_blocks = offset_blocks;
+ p->num_blocks = num_blocks;
+ p->cb = cb;
+ p->cb_arg = cb_arg;
+ p->iotype = SPDK_BDEV_IO_TYPE_WRITE;
+ g_io_output_index++;
+ child_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(child_io != NULL);
+ cb(child_io, g_child_io_status_flag, cb_arg);
+ }
+
+ return g_bdev_io_submit_status;
+}
+
+int
+spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return 0;
+}
+
+void
+spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
+{
+ bdev->fn_table->destruct(bdev->ctxt);
+}
+
+int
+spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **_desc)
+{
+ *_desc = (void *)0x1;
+ return 0;
+}
+
+void
+spdk_put_io_channel(struct spdk_io_channel *ch)
+{
+ CU_ASSERT(ch == (void *)1);
+}
+
+struct spdk_io_channel *
+spdk_get_io_channel(void *io_device)
+{
+ return NULL;
+}
+
+void
+spdk_poller_unregister(struct spdk_poller **ppoller)
+{
+}
+
+struct spdk_poller *
+spdk_poller_register(spdk_poller_fn fn,
+ void *arg,
+ uint64_t period_microseconds)
+{
+ return (void *)1;
+}
+
+void
+spdk_io_device_unregister(void *io_device, spdk_io_device_unregister_cb unregister_cb)
+{
+}
+
+char *
+spdk_sprintf_alloc(const char *format, ...)
+{
+ return strdup(format);
+}
+
+void
+spdk_io_device_register(void *io_device, spdk_io_channel_create_cb create_cb,
+ spdk_io_channel_destroy_cb destroy_cb, uint32_t ctx_size,
+ const char *name)
+{
+}
+
+int
+spdk_json_write_name(struct spdk_json_write_ctx *w, const char *name)
+{
+ return 0;
+}
+
+int spdk_json_write_named_uint32(struct spdk_json_write_ctx *w, const char *name, uint32_t val)
+{
+ struct rpc_construct_raid_bdev *req = rpc_req;
+ if (strcmp(name, "strip_size") == 0) {
+ CU_ASSERT(req->strip_size * 1024 / g_block_len == val);
+ } else if (strcmp(name, "blocklen_shift") == 0) {
+ CU_ASSERT(spdk_u32log2(g_block_len) == val);
+ } else if (strcmp(name, "raid_level") == 0) {
+ CU_ASSERT(req->raid_level == val);
+ } else if (strcmp(name, "num_base_bdevs") == 0) {
+ CU_ASSERT(req->base_bdevs.num_base_bdevs == val);
+ } else if (strcmp(name, "state") == 0) {
+ CU_ASSERT(val == RAID_BDEV_STATE_ONLINE);
+ } else if (strcmp(name, "destruct_called") == 0) {
+ CU_ASSERT(val == 0);
+ } else if (strcmp(name, "num_base_bdevs_discovered") == 0) {
+ CU_ASSERT(req->base_bdevs.num_base_bdevs == val);
+ }
+ return 0;
+}
+
+int spdk_json_write_named_string(struct spdk_json_write_ctx *w, const char *name, const char *val)
+{
+ return 0;
+}
+
+int
+spdk_json_write_object_begin(struct spdk_json_write_ctx *w)
+{
+ return 0;
+}
+
+int
+spdk_json_write_named_object_begin(struct spdk_json_write_ctx *w, const char *name)
+{
+ return 0;
+}
+
+int
+spdk_json_write_named_array_begin(struct spdk_json_write_ctx *w, const char *name)
+{
+ return 0;
+}
+
+int
+spdk_json_write_array_end(struct spdk_json_write_ctx *w)
+{
+ return 0;
+}
+
+int
+spdk_json_write_object_end(struct spdk_json_write_ctx *w)
+{
+ return 0;
+}
+
+int
+spdk_json_write_bool(struct spdk_json_write_ctx *w, bool val)
+{
+ return 0;
+}
+
+int spdk_json_write_null(struct spdk_json_write_ctx *w)
+{
+ return 0;
+}
+
+struct spdk_io_channel *
+spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
+{
+ return (void *)1;
+}
+
+void
+spdk_for_each_thread(spdk_thread_fn fn, void *ctx, spdk_thread_fn cpl)
+{
+ fn(ctx);
+ cpl(ctx);
+}
+
+struct spdk_thread *
+spdk_get_thread(void)
+{
+ return NULL;
+}
+
+void
+spdk_thread_send_msg(const struct spdk_thread *thread, spdk_thread_fn fn, void *ctx)
+{
+ fn(ctx);
+}
+
+uint32_t
+spdk_env_get_current_core(void)
+{
+ return 0;
+}
+
+void
+spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
+{
+ if (bdev_io) {
+ free(bdev_io);
+ }
+}
+
+/* It will cache split IOs for verification */
+int
+spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ struct io_output *p = &g_io_output[g_io_output_index];
+ struct spdk_bdev_io *child_io;
+
+ if (g_ignore_io_output) {
+ return 0;
+ }
+
+ SPDK_CU_ASSERT_FATAL(g_io_output_index <= (g_max_io_size / g_strip_size) + 1);
+ if (g_bdev_io_submit_status == 0) {
+ p->desc = desc;
+ p->ch = ch;
+ p->offset_blocks = offset_blocks;
+ p->num_blocks = num_blocks;
+ p->cb = cb;
+ p->cb_arg = cb_arg;
+ p->iotype = SPDK_BDEV_IO_TYPE_READ;
+ g_io_output_index++;
+ child_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(child_io != NULL);
+ cb(child_io, g_child_io_status_flag, cb_arg);
+ }
+
+ return g_bdev_io_submit_status;
+}
+
+void
+spdk_bdev_module_release_bdev(struct spdk_bdev *bdev)
+{
+ CU_ASSERT(bdev->internal.claim_module != NULL);
+ bdev->internal.claim_module = NULL;
+}
+
+void
+spdk_bdev_module_examine_done(struct spdk_bdev_module *module)
+{
+}
+
+struct spdk_conf_section *
+spdk_conf_first_section(struct spdk_conf *cp)
+{
+ if (g_config_level_create) {
+ return (void *) 0x1;
+ }
+
+ return NULL;
+}
+
+bool
+spdk_conf_section_match_prefix(const struct spdk_conf_section *sp, const char *name_prefix)
+{
+ if (g_config_level_create) {
+ return true;
+ }
+
+ return false;
+}
+
+char *
+spdk_conf_section_get_val(struct spdk_conf_section *sp, const char *key)
+{
+ struct rpc_construct_raid_bdev *req = rpc_req;
+
+ if (g_config_level_create) {
+ if (strcmp(key, "Name") == 0) {
+ return req->name;
+ }
+ }
+
+ return NULL;
+}
+
+int
+spdk_conf_section_get_intval(struct spdk_conf_section *sp, const char *key)
+{
+ struct rpc_construct_raid_bdev *req = rpc_req;
+
+ if (g_config_level_create) {
+ if (strcmp(key, "StripSize") == 0) {
+ return req->strip_size;
+ } else if (strcmp(key, "NumDevices") == 0) {
+ return req->base_bdevs.num_base_bdevs;
+ } else if (strcmp(key, "RaidLevel") == 0) {
+ return req->raid_level;
+ }
+ }
+
+ return 0;
+}
+
+struct spdk_conf_section *
+spdk_conf_next_section(struct spdk_conf_section *sp)
+{
+ return NULL;
+}
+
+char *
+spdk_conf_section_get_nmval(struct spdk_conf_section *sp, const char *key, int idx1, int idx2)
+{
+ struct rpc_construct_raid_bdev *req = rpc_req;
+
+ if (g_config_level_create) {
+ if (strcmp(key, "Devices") == 0) {
+ if (idx2 >= g_max_base_drives) {
+ return NULL;
+ }
+ return req->base_bdevs.base_bdevs[idx2];
+ }
+ }
+
+ return NULL;
+}
+
+void
+spdk_bdev_close(struct spdk_bdev_desc *desc)
+{
+}
+
+int
+spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
+ struct spdk_bdev_module *module)
+{
+ if (bdev->internal.claim_module != NULL) {
+ return -1;
+ }
+ bdev->internal.claim_module = module;
+ return 0;
+}
+
+int
+spdk_bdev_register(struct spdk_bdev *bdev)
+{
+ return 0;
+}
+
+uint32_t
+spdk_env_get_last_core(void)
+{
+ return 0;
+}
+
+int
+spdk_json_decode_string(const struct spdk_json_val *val, void *out)
+{
+ return 0;
+}
+
+int
+spdk_json_decode_object(const struct spdk_json_val *values,
+ const struct spdk_json_object_decoder *decoders, size_t num_decoders, void *out)
+{
+ struct rpc_construct_raid_bdev *req, *_out;
+ size_t i;
+
+ if (g_json_decode_obj_err) {
+ return -1;
+ } else if (g_json_decode_obj_construct) {
+ req = rpc_req;
+ _out = out;
+
+ _out->name = strdup(req->name);
+ SPDK_CU_ASSERT_FATAL(_out->name != NULL);
+ _out->strip_size = req->strip_size;
+ _out->raid_level = req->raid_level;
+ _out->base_bdevs.num_base_bdevs = req->base_bdevs.num_base_bdevs;
+ for (i = 0; i < req->base_bdevs.num_base_bdevs; i++) {
+ _out->base_bdevs.base_bdevs[i] = strdup(req->base_bdevs.base_bdevs[i]);
+ SPDK_CU_ASSERT_FATAL(_out->base_bdevs.base_bdevs[i]);
+ }
+ } else {
+ memcpy(out, rpc_req, rpc_req_size);
+ }
+
+ return 0;
+}
+
+struct spdk_json_write_ctx *
+spdk_jsonrpc_begin_result(struct spdk_jsonrpc_request *request)
+{
+ if (g_json_beg_res_ret_err) {
+ return NULL;
+ } else {
+ return (void *)1;
+ }
+}
+
+int
+spdk_json_write_array_begin(struct spdk_json_write_ctx *w)
+{
+ return 0;
+}
+
+int
+spdk_json_write_string(struct spdk_json_write_ctx *w, const char *val)
+{
+ if (g_test_multi_raids) {
+ g_get_raids_output[g_get_raids_count] = strdup(val);
+ SPDK_CU_ASSERT_FATAL(g_get_raids_output[g_get_raids_count] != NULL);
+ g_get_raids_count++;
+ }
+
+ return 0;
+}
+
+void
+spdk_jsonrpc_send_error_response(struct spdk_jsonrpc_request *request,
+ int error_code, const char *msg)
+{
+ g_rpc_err = 1;
+}
+
+void
+spdk_jsonrpc_send_error_response_fmt(struct spdk_jsonrpc_request *request,
+ int error_code, const char *fmt, ...)
+{
+ g_rpc_err = 1;
+}
+
+void
+spdk_jsonrpc_end_result(struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w)
+{
+}
+
+struct spdk_bdev *
+spdk_bdev_get_by_name(const char *bdev_name)
+{
+ struct spdk_bdev *bdev;
+
+ if (!TAILQ_EMPTY(&g_bdev_list)) {
+ TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) {
+ if (strcmp(bdev_name, bdev->name) == 0) {
+ return bdev;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+const char *
+spdk_strerror(int errnum)
+{
+ return NULL;
+}
+
+int
+spdk_json_decode_array(const struct spdk_json_val *values, spdk_json_decode_fn decode_func,
+ void *out, size_t max_size, size_t *out_size, size_t stride)
+{
+ return 0;
+}
+
+void
+spdk_rpc_register_method(const char *method, spdk_rpc_method_handler func, uint32_t state_mask)
+{
+}
+
+int
+spdk_json_decode_uint32(const struct spdk_json_val *val, void *out)
+{
+ return 0;
+}
+
+
+void
+spdk_bdev_module_list_add(struct spdk_bdev_module *bdev_module)
+{
+}
+
+static void
+bdev_io_cleanup(struct spdk_bdev_io *bdev_io)
+{
+ if (bdev_io->u.bdev.iovs) {
+ if (bdev_io->u.bdev.iovs->iov_base) {
+ free(bdev_io->u.bdev.iovs->iov_base);
+ bdev_io->u.bdev.iovs->iov_base = NULL;
+ }
+ free(bdev_io->u.bdev.iovs);
+ bdev_io->u.bdev.iovs = NULL;
+ }
+}
+
+static void
+bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_bdev *bdev,
+ uint64_t lba, uint64_t blocks, int16_t iotype)
+{
+ bdev_io->bdev = bdev;
+ bdev_io->u.bdev.offset_blocks = lba;
+ bdev_io->u.bdev.num_blocks = blocks;
+ bdev_io->type = iotype;
+ bdev_io->u.bdev.iovcnt = 1;
+ bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec));
+ SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL);
+ bdev_io->u.bdev.iovs->iov_base = calloc(1, bdev_io->u.bdev.num_blocks * g_block_len);
+ SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs->iov_base != NULL);
+ bdev_io->u.bdev.iovs->iov_len = bdev_io->u.bdev.num_blocks * g_block_len;
+ bdev_io->u.bdev.iovs = bdev_io->u.bdev.iovs;
+}
+
+static void
+verify_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
+ struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status)
+{
+ uint32_t strip_shift = spdk_u32log2(g_strip_size);
+ uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift;
+ uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
+ strip_shift;
+ uint32_t splits_reqd = (end_strip - start_strip + 1);
+ uint32_t strip;
+ uint64_t pd_strip;
+ uint64_t pd_idx;
+ uint32_t offset_in_strip;
+ uint64_t pd_lba;
+ uint64_t pd_blocks;
+ uint32_t index = 0;
+ uint8_t *buf = bdev_io->u.bdev.iovs->iov_base;
+
+ if (io_status == INVALID_IO_SUBMIT) {
+ CU_ASSERT(g_io_comp_status == false);
+ return;
+ }
+ SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
+ SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
+
+ CU_ASSERT(splits_reqd == g_io_output_index);
+ for (strip = start_strip; strip <= end_strip; strip++, index++) {
+ pd_strip = strip / num_base_drives;
+ pd_idx = strip % num_base_drives;
+ if (strip == start_strip) {
+ offset_in_strip = bdev_io->u.bdev.offset_blocks & (g_strip_size - 1);
+ pd_lba = (pd_strip << strip_shift) + offset_in_strip;
+ if (strip == end_strip) {
+ pd_blocks = bdev_io->u.bdev.num_blocks;
+ } else {
+ pd_blocks = g_strip_size - offset_in_strip;
+ }
+ } else if (strip == end_strip) {
+ pd_lba = pd_strip << strip_shift;
+ pd_blocks = ((bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) &
+ (g_strip_size - 1)) + 1;
+ } else {
+ pd_lba = pd_strip << raid_bdev->strip_size_shift;
+ pd_blocks = raid_bdev->strip_size;
+ }
+ CU_ASSERT(pd_lba == g_io_output[index].offset_blocks);
+ CU_ASSERT(pd_blocks == g_io_output[index].num_blocks);
+ CU_ASSERT(ch_ctx->base_channel[pd_idx] == g_io_output[index].ch);
+ CU_ASSERT(raid_bdev->base_bdev_info[pd_idx].desc == g_io_output[index].desc);
+ CU_ASSERT(bdev_io->type == g_io_output[index].iotype);
+ buf += (pd_blocks << spdk_u32log2(g_block_len));
+ }
+ CU_ASSERT(g_io_comp_status == io_status);
+}
+
+static void
+verify_raid_config_present(const char *name, bool presence)
+{
+ struct raid_bdev_config *raid_cfg;
+ bool cfg_found;
+
+ cfg_found = false;
+
+ TAILQ_FOREACH(raid_cfg, &g_spdk_raid_config.raid_bdev_config_head, link) {
+ if (raid_cfg->name != NULL) {
+ if (strcmp(name, raid_cfg->name) == 0) {
+ cfg_found = true;
+ break;
+ }
+ }
+ }
+
+ if (presence == true) {
+ CU_ASSERT(cfg_found == true);
+ } else {
+ CU_ASSERT(cfg_found == false);
+ }
+}
+
+static void
+verify_raid_bdev_present(const char *name, bool presence)
+{
+ struct raid_bdev *pbdev;
+ bool pbdev_found;
+
+ pbdev_found = false;
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, name) == 0) {
+ pbdev_found = true;
+ break;
+ }
+ }
+ if (presence == true) {
+ CU_ASSERT(pbdev_found == true);
+ } else {
+ CU_ASSERT(pbdev_found == false);
+ }
+}
+static void
+verify_raid_config(struct rpc_construct_raid_bdev *r, bool presence)
+{
+ struct raid_bdev_config *raid_cfg = NULL;
+ uint32_t i;
+ int val;
+
+ TAILQ_FOREACH(raid_cfg, &g_spdk_raid_config.raid_bdev_config_head, link) {
+ if (strcmp(r->name, raid_cfg->name) == 0) {
+ if (presence == false) {
+ break;
+ }
+ CU_ASSERT(raid_cfg->raid_bdev != NULL);
+ CU_ASSERT(raid_cfg->strip_size == r->strip_size);
+ CU_ASSERT(raid_cfg->num_base_bdevs == r->base_bdevs.num_base_bdevs);
+ CU_ASSERT(raid_cfg->raid_level == r->raid_level);
+ if (raid_cfg->base_bdev != NULL) {
+ for (i = 0; i < raid_cfg->num_base_bdevs; i++) {
+ val = strcmp(raid_cfg->base_bdev[i].name, r->base_bdevs.base_bdevs[i]);
+ CU_ASSERT(val == 0);
+ }
+ }
+ break;
+ }
+ }
+
+ if (presence == true) {
+ CU_ASSERT(raid_cfg != NULL);
+ } else {
+ CU_ASSERT(raid_cfg == NULL);
+ }
+}
+
+static void
+verify_raid_bdev(struct rpc_construct_raid_bdev *r, bool presence, uint32_t raid_state)
+{
+ struct raid_bdev *pbdev;
+ uint32_t i;
+ struct spdk_bdev *bdev = NULL;
+ bool pbdev_found;
+ uint64_t min_blockcnt = 0xFFFFFFFFFFFFFFFF;
+
+ pbdev_found = false;
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, r->name) == 0) {
+ pbdev_found = true;
+ if (presence == false) {
+ break;
+ }
+ CU_ASSERT(pbdev->config->raid_bdev == pbdev);
+ CU_ASSERT(pbdev->base_bdev_info != NULL);
+ CU_ASSERT(pbdev->strip_size == ((r->strip_size * 1024) / g_block_len));
+ CU_ASSERT(pbdev->strip_size_shift == spdk_u32log2(((r->strip_size * 1024) / g_block_len)));
+ CU_ASSERT(pbdev->blocklen_shift == spdk_u32log2(g_block_len));
+ CU_ASSERT(pbdev->state == raid_state);
+ CU_ASSERT(pbdev->num_base_bdevs == r->base_bdevs.num_base_bdevs);
+ CU_ASSERT(pbdev->num_base_bdevs_discovered == r->base_bdevs.num_base_bdevs);
+ CU_ASSERT(pbdev->raid_level == r->raid_level);
+ CU_ASSERT(pbdev->destruct_called == false);
+ for (i = 0; i < pbdev->num_base_bdevs; i++) {
+ if (pbdev->base_bdev_info && pbdev->base_bdev_info[i].bdev) {
+ bdev = spdk_bdev_get_by_name(pbdev->base_bdev_info[i].bdev->name);
+ CU_ASSERT(bdev != NULL);
+ CU_ASSERT(pbdev->base_bdev_info[i].remove_scheduled == false);
+ } else {
+ CU_ASSERT(0);
+ }
+
+ if (bdev && bdev->blockcnt < min_blockcnt) {
+ min_blockcnt = bdev->blockcnt;
+ }
+ }
+ CU_ASSERT((((min_blockcnt / (r->strip_size * 1024 / g_block_len)) * (r->strip_size * 1024 /
+ g_block_len)) * r->base_bdevs.num_base_bdevs) == pbdev->bdev.blockcnt);
+ CU_ASSERT(strcmp(pbdev->bdev.product_name, "Pooled Device") == 0);
+ CU_ASSERT(pbdev->bdev.write_cache == 0);
+ CU_ASSERT(pbdev->bdev.blocklen == g_block_len);
+ if (pbdev->num_base_bdevs > 1) {
+ CU_ASSERT(pbdev->bdev.optimal_io_boundary == pbdev->strip_size);
+ CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == true);
+ } else {
+ CU_ASSERT(pbdev->bdev.optimal_io_boundary == 0);
+ CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == false);
+ }
+ CU_ASSERT(pbdev->bdev.ctxt == pbdev);
+ CU_ASSERT(pbdev->bdev.fn_table == &g_raid_bdev_fn_table);
+ CU_ASSERT(pbdev->bdev.module == &g_raid_if);
+ break;
+ }
+ }
+ if (presence == true) {
+ CU_ASSERT(pbdev_found == true);
+ } else {
+ CU_ASSERT(pbdev_found == false);
+ }
+ pbdev_found = false;
+ if (raid_state == RAID_BDEV_STATE_ONLINE) {
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_configured_list, state_link) {
+ if (strcmp(pbdev->bdev.name, r->name) == 0) {
+ pbdev_found = true;
+ break;
+ }
+ }
+ } else if (raid_state == RAID_BDEV_STATE_CONFIGURING) {
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_configuring_list, state_link) {
+ if (strcmp(pbdev->bdev.name, r->name) == 0) {
+ pbdev_found = true;
+ break;
+ }
+ }
+ } else if (raid_state == RAID_BDEV_STATE_OFFLINE) {
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_offline_list, state_link) {
+ if (strcmp(pbdev->bdev.name, r->name) == 0) {
+ pbdev_found = true;
+ break;
+ }
+ }
+ }
+ if (presence == true) {
+ CU_ASSERT(pbdev_found == true);
+ } else {
+ CU_ASSERT(pbdev_found == false);
+ }
+}
+
+int
+spdk_bdev_queue_io_wait(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
+ struct spdk_bdev_io_wait_entry *entry)
+{
+ CU_ASSERT(bdev == entry->bdev);
+ CU_ASSERT(entry->cb_fn != NULL);
+ CU_ASSERT(entry->cb_arg != NULL);
+ TAILQ_INSERT_TAIL(&g_io_waitq, entry, link);
+ return 0;
+}
+
+
+static uint32_t
+get_num_elts_in_waitq(void)
+{
+ struct spdk_bdev_io_wait_entry *ele;
+ uint32_t count = 0;
+
+ TAILQ_FOREACH(ele, &g_io_waitq, link) {
+ count++;
+ }
+
+ return count;
+}
+
+static void
+process_io_waitq(void)
+{
+ struct spdk_bdev_io_wait_entry *ele;
+ struct spdk_bdev_io_wait_entry *next_ele;
+
+ TAILQ_FOREACH_SAFE(ele, &g_io_waitq, link, next_ele) {
+ TAILQ_REMOVE(&g_io_waitq, ele, link);
+ ele->cb_fn(ele->cb_arg);
+ }
+}
+
+static void
+verify_get_raids(struct rpc_construct_raid_bdev *construct_req,
+ uint8_t g_max_raids,
+ char **g_get_raids_output, uint32_t g_get_raids_count)
+{
+ uint32_t i, j;
+ bool found;
+
+ CU_ASSERT(g_max_raids == g_get_raids_count);
+ if (g_max_raids == g_get_raids_count) {
+ for (i = 0; i < g_max_raids; i++) {
+ found = false;
+ for (j = 0; j < g_max_raids; j++) {
+ if (construct_req[i].name && strcmp(construct_req[i].name, g_get_raids_output[i]) == 0) {
+ found = true;
+ break;
+ }
+ }
+ CU_ASSERT(found == true);
+ }
+ }
+}
+
+static void
+create_base_bdevs(uint32_t bbdev_start_idx)
+{
+ uint32_t i;
+ struct spdk_bdev *base_bdev;
+ char name[16];
+ uint16_t num_chars;
+
+ for (i = 0; i < g_max_base_drives; i++, bbdev_start_idx++) {
+ num_chars = snprintf(name, 16, "%s%u%s", "Nvme", bbdev_start_idx, "n1");
+ name[num_chars] = '\0';
+ base_bdev = calloc(1, sizeof(struct spdk_bdev));
+ SPDK_CU_ASSERT_FATAL(base_bdev != NULL);
+ base_bdev->name = strdup(name);
+ SPDK_CU_ASSERT_FATAL(base_bdev->name != NULL);
+ base_bdev->blocklen = g_block_len;
+ base_bdev->blockcnt = (uint64_t)1024 * 1024 * 1024 * 1024;
+ TAILQ_INSERT_TAIL(&g_bdev_list, base_bdev, internal.link);
+ }
+}
+
+static void
+create_test_req(struct rpc_construct_raid_bdev *r, const char *raid_name, uint32_t bbdev_start_idx,
+ bool create_base_bdev)
+{
+ uint32_t i;
+ char name[16];
+ uint16_t num_chars;
+ uint32_t bbdev_idx = bbdev_start_idx;
+
+ r->name = strdup(raid_name);
+ SPDK_CU_ASSERT_FATAL(r->name != NULL);
+ r->strip_size = (g_strip_size * g_block_len) / 1024;
+ r->raid_level = 0;
+ r->base_bdevs.num_base_bdevs = g_max_base_drives;
+ for (i = 0; i < g_max_base_drives; i++, bbdev_idx++) {
+ num_chars = snprintf(name, 16, "%s%u%s", "Nvme", bbdev_idx, "n1");
+ name[num_chars] = '\0';
+ r->base_bdevs.base_bdevs[i] = strdup(name);
+ SPDK_CU_ASSERT_FATAL(r->base_bdevs.base_bdevs[i] != NULL);
+ }
+ if (create_base_bdev == true) {
+ create_base_bdevs(bbdev_start_idx);
+ }
+}
+
+static void
+free_test_req(struct rpc_construct_raid_bdev *r)
+{
+ uint8_t i;
+
+ free(r->name);
+ for (i = 0; i < r->base_bdevs.num_base_bdevs; i++) {
+ free(r->base_bdevs.base_bdevs[i]);
+ }
+}
+
+static void
+test_construct_raid(void)
+{
+ struct rpc_construct_raid_bdev req;
+ struct rpc_destroy_raid_bdev destroy_req;
+
+ set_globals();
+ create_test_req(&req, "raid1", 0, true);
+ rpc_req = &req;
+ rpc_req_size = sizeof(req);
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present(req.name, false);
+ verify_raid_bdev_present(req.name, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ free_test_req(&req);
+
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_destroy_raid(void)
+{
+ struct rpc_construct_raid_bdev construct_req;
+ struct rpc_destroy_raid_bdev destroy_req;
+
+ set_globals();
+ create_test_req(&construct_req, "raid1", 0, true);
+ rpc_req = &construct_req;
+ rpc_req_size = sizeof(construct_req);
+ CU_ASSERT(raid_bdev_init() == 0);
+ verify_raid_config_present(construct_req.name, false);
+ verify_raid_bdev_present(construct_req.name, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&construct_req, true);
+ verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
+ free_test_req(&construct_req);
+
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_construct_raid_invalid_args(void)
+{
+ struct rpc_construct_raid_bdev req;
+ struct rpc_destroy_raid_bdev destroy_req;
+ struct raid_bdev_config *raid_cfg;
+
+ set_globals();
+ rpc_req = &req;
+ rpc_req_size = sizeof(req);
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ create_test_req(&req, "raid1", 0, true);
+ verify_raid_config_present(req.name, false);
+ verify_raid_bdev_present(req.name, false);
+ req.raid_level = 1;
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_test_req(&req, "raid1", 0, false);
+ verify_raid_config_present(req.name, false);
+ verify_raid_bdev_present(req.name, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_err = 1;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ g_json_decode_obj_err = 0;
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_test_req(&req, "raid1", 0, false);
+ req.strip_size = 1231;
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_test_req(&req, "raid1", 0, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ free_test_req(&req);
+
+ create_test_req(&req, "raid1", 0, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+
+ create_test_req(&req, "raid2", 0, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+ verify_raid_config_present("raid2", false);
+ verify_raid_bdev_present("raid2", false);
+
+ create_test_req(&req, "raid2", g_max_base_drives, true);
+ free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]);
+ req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme0n1");
+ SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+ verify_raid_config_present("raid2", false);
+ verify_raid_bdev_present("raid2", false);
+
+ create_test_req(&req, "raid2", g_max_base_drives, true);
+ free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]);
+ req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme100000n1");
+ SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid2", true);
+ verify_raid_bdev_present("raid2", true);
+ raid_cfg = raid_bdev_config_find_by_name("raid2");
+ SPDK_CU_ASSERT_FATAL(raid_cfg != NULL);
+ check_and_remove_raid_bdev(raid_cfg);
+ raid_bdev_config_cleanup(raid_cfg);
+
+ create_test_req(&req, "raid2", g_max_base_drives, false);
+ g_rpc_err = 0;
+ g_json_beg_res_ret_err = 1;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid2", true);
+ verify_raid_bdev_present("raid2", true);
+ verify_raid_config_present("raid1", true);
+ verify_raid_bdev_present("raid1", true);
+ g_json_beg_res_ret_err = 0;
+
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ destroy_req.name = strdup("raid2");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_destroy_raid_invalid_args(void)
+{
+ struct rpc_construct_raid_bdev construct_req;
+ struct rpc_destroy_raid_bdev destroy_req;
+
+ set_globals();
+ create_test_req(&construct_req, "raid1", 0, true);
+ rpc_req = &construct_req;
+ rpc_req_size = sizeof(construct_req);
+ CU_ASSERT(raid_bdev_init() == 0);
+ verify_raid_config_present(construct_req.name, false);
+ verify_raid_bdev_present(construct_req.name, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&construct_req, true);
+ verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
+ free_test_req(&construct_req);
+
+ destroy_req.name = strdup("raid2");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+
+ destroy_req.name = strdup("raid1");
+ g_rpc_err = 0;
+ g_json_decode_obj_err = 1;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ g_json_decode_obj_err = 0;
+ g_rpc_err = 0;
+ free(destroy_req.name);
+ verify_raid_config_present("raid1", true);
+ verify_raid_bdev_present("raid1", true);
+
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_io_channel(void)
+{
+ struct rpc_construct_raid_bdev req;
+ struct rpc_destroy_raid_bdev destroy_req;
+ struct raid_bdev *pbdev;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint32_t i;
+
+ set_globals();
+ create_test_req(&req, "raid1", 0, true);
+ rpc_req = &req;
+ rpc_req_size = sizeof(req);
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present(req.name, false);
+ verify_raid_bdev_present(req.name, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, req.name) == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch_ctx = calloc(1, sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == (void *)0x1);
+ }
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ free_test_req(&req);
+
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ free(ch_ctx);
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_write_io(void)
+{
+ struct rpc_construct_raid_bdev req;
+ struct rpc_destroy_raid_bdev destroy_req;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint32_t i;
+ struct spdk_bdev_io *bdev_io;
+ uint32_t count;
+ uint64_t io_len;
+ uint64_t lba;
+
+ set_globals();
+ create_test_req(&req, "raid1", 0, true);
+ rpc_req = &req;
+ rpc_req_size = sizeof(req);
+ CU_ASSERT(raid_bdev_init() == 0);
+ verify_raid_config_present(req.name, false);
+ verify_raid_bdev_present(req.name, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, req.name) == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(ch);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == (void *)0x1);
+ }
+
+ lba = 0;
+ for (count = 0; count < g_max_qd; count++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = (rand() % g_strip_size) + 1;
+ bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
+ lba += g_strip_size;
+ memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
+ g_child_io_status_flag);
+ bdev_io_cleanup(bdev_io);
+ free(bdev_io);
+ }
+ free_test_req(&req);
+
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ free(ch);
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_read_io(void)
+{
+ struct rpc_construct_raid_bdev req;
+ struct rpc_destroy_raid_bdev destroy_req;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint32_t i;
+ struct spdk_bdev_io *bdev_io;
+ uint32_t count;
+ uint64_t io_len;
+ uint64_t lba;
+
+ set_globals();
+ create_test_req(&req, "raid1", 0, true);
+ rpc_req = &req;
+ rpc_req_size = sizeof(req);
+ CU_ASSERT(raid_bdev_init() == 0);
+ verify_raid_config_present(req.name, false);
+ verify_raid_bdev_present(req.name, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, req.name) == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(ch);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == (void *)0x1);
+ }
+ free_test_req(&req);
+
+ lba = 0;
+ for (count = 0; count < g_max_qd; count++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = (rand() % g_strip_size) + 1;
+ bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_READ);
+ lba += g_strip_size;
+ memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
+ g_child_io_status_flag);
+ bdev_io_cleanup(bdev_io);
+ free(bdev_io);
+ }
+
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ free(ch);
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+/* Test IO failures */
+static void
+test_io_failure(void)
+{
+ struct rpc_construct_raid_bdev req;
+ struct rpc_destroy_raid_bdev destroy_req;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint32_t i;
+ struct spdk_bdev_io *bdev_io;
+ uint32_t count;
+ uint64_t io_len;
+ uint64_t lba;
+
+ set_globals();
+ create_test_req(&req, "raid1", 0, true);
+ rpc_req = &req;
+ rpc_req_size = sizeof(req);
+ CU_ASSERT(raid_bdev_init() == 0);
+ verify_raid_config_present(req.name, false);
+ verify_raid_bdev_present(req.name, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, req.name) == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(ch);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == (void *)0x1);
+ }
+ free_test_req(&req);
+
+ lba = 0;
+ for (count = 0; count < 1; count++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = (rand() % g_strip_size) + 1;
+ bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_INVALID);
+ lba += g_strip_size;
+ memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
+ INVALID_IO_SUBMIT);
+ bdev_io_cleanup(bdev_io);
+ free(bdev_io);
+ }
+
+
+ lba = 0;
+ g_child_io_status_flag = false;
+ for (count = 0; count < 1; count++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = (rand() % g_strip_size) + 1;
+ bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
+ lba += g_strip_size;
+ memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
+ g_child_io_status_flag);
+ bdev_io_cleanup(bdev_io);
+ free(bdev_io);
+ }
+
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ free(ch);
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+/* Test waitq logic */
+static void
+test_io_waitq(void)
+{
+ struct rpc_construct_raid_bdev req;
+ struct rpc_destroy_raid_bdev destroy_req;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint32_t i;
+ struct spdk_bdev_io *bdev_io;
+ struct spdk_bdev_io *bdev_io_next;
+ uint32_t count;
+ uint64_t io_len;
+ uint64_t lba;
+ TAILQ_HEAD(, spdk_bdev_io) head_io;
+
+ set_globals();
+ create_test_req(&req, "raid1", 0, true);
+ rpc_req = &req;
+ rpc_req_size = sizeof(req);
+ CU_ASSERT(raid_bdev_init() == 0);
+ verify_raid_config_present(req.name, false);
+ verify_raid_bdev_present(req.name, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, req.name) == 0) {
+ break;
+ }
+ }
+ SPDK_CU_ASSERT_FATAL(pbdev != NULL);
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(ch);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel != NULL);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ CU_ASSERT(ch_ctx->base_channel[i] == (void *)0x1);
+ }
+ free_test_req(&req);
+
+ lba = 0;
+ TAILQ_INIT(&head_io);
+ for (count = 0; count < g_max_qd; count++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ TAILQ_INSERT_TAIL(&head_io, bdev_io, module_link);
+ io_len = (rand() % g_strip_size) + 1;
+ bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
+ g_bdev_io_submit_status = -ENOMEM;
+ lba += g_strip_size;
+ raid_bdev_submit_request(ch, bdev_io);
+ }
+
+ g_ignore_io_output = 1;
+
+ count = get_num_elts_in_waitq();
+ CU_ASSERT(count == g_max_qd);
+ g_bdev_io_submit_status = 0;
+ process_io_waitq();
+ CU_ASSERT(TAILQ_EMPTY(&g_io_waitq));
+
+ TAILQ_FOREACH_SAFE(bdev_io, &head_io, module_link, bdev_io_next) {
+ bdev_io_cleanup(bdev_io);
+ free(bdev_io);
+ }
+
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ g_ignore_io_output = 0;
+ free(ch);
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+/* Create multiple raids, destroy raids without IO, get_raids related tests */
+static void
+test_multi_raid_no_io(void)
+{
+ struct rpc_construct_raid_bdev *construct_req;
+ struct rpc_destroy_raid_bdev destroy_req;
+ struct rpc_get_raid_bdevs get_raids_req;
+ uint32_t i;
+ char name[16];
+ uint32_t count;
+ uint32_t bbdev_idx = 0;
+
+ set_globals();
+ construct_req = calloc(MAX_RAIDS, sizeof(struct rpc_construct_raid_bdev));
+ SPDK_CU_ASSERT_FATAL(construct_req != NULL);
+ CU_ASSERT(raid_bdev_init() == 0);
+ for (i = 0; i < g_max_raids; i++) {
+ count = snprintf(name, 16, "%s%u", "raid", i);
+ name[count] = '\0';
+ create_test_req(&construct_req[i], name, bbdev_idx, true);
+ verify_raid_config_present(name, false);
+ verify_raid_bdev_present(name, false);
+ bbdev_idx += g_max_base_drives;
+ rpc_req = &construct_req[i];
+ rpc_req_size = sizeof(construct_req[0]);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&construct_req[i], true);
+ verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE);
+ }
+
+ get_raids_req.category = strdup("all");
+ rpc_req = &get_raids_req;
+ rpc_req_size = sizeof(get_raids_req);
+ g_rpc_err = 0;
+ g_test_multi_raids = 1;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_get_raid_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count);
+ for (i = 0; i < g_get_raids_count; i++) {
+ free(g_get_raids_output[i]);
+ }
+ g_get_raids_count = 0;
+
+ get_raids_req.category = strdup("online");
+ rpc_req = &get_raids_req;
+ rpc_req_size = sizeof(get_raids_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_get_raid_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count);
+ for (i = 0; i < g_get_raids_count; i++) {
+ free(g_get_raids_output[i]);
+ }
+ g_get_raids_count = 0;
+
+ get_raids_req.category = strdup("configuring");
+ rpc_req = &get_raids_req;
+ rpc_req_size = sizeof(get_raids_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_get_raid_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ CU_ASSERT(g_get_raids_count == 0);
+
+ get_raids_req.category = strdup("offline");
+ rpc_req = &get_raids_req;
+ rpc_req_size = sizeof(get_raids_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_get_raid_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ CU_ASSERT(g_get_raids_count == 0);
+
+ get_raids_req.category = strdup("invalid_category");
+ rpc_req = &get_raids_req;
+ rpc_req_size = sizeof(get_raids_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_get_raid_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ CU_ASSERT(g_get_raids_count == 0);
+
+ get_raids_req.category = strdup("all");
+ rpc_req = &get_raids_req;
+ rpc_req_size = sizeof(get_raids_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_err = 1;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_get_raid_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ g_json_decode_obj_err = 0;
+ free(get_raids_req.category);
+ CU_ASSERT(g_get_raids_count == 0);
+
+ get_raids_req.category = strdup("all");
+ rpc_req = &get_raids_req;
+ rpc_req_size = sizeof(get_raids_req);
+ g_rpc_err = 0;
+ g_json_beg_res_ret_err = 1;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_get_raid_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ g_json_beg_res_ret_err = 0;
+ CU_ASSERT(g_get_raids_count == 0);
+
+ for (i = 0; i < g_max_raids; i++) {
+ SPDK_CU_ASSERT_FATAL(construct_req[i].name != NULL);
+ destroy_req.name = strdup(construct_req[i].name);
+ count = snprintf(name, 16, "%s", destroy_req.name);
+ name[count] = '\0';
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present(name, false);
+ verify_raid_bdev_present(name, false);
+ }
+ g_test_multi_raids = 0;
+ raid_bdev_exit();
+ for (i = 0; i < g_max_raids; i++) {
+ free_test_req(&construct_req[i]);
+ }
+ free(construct_req);
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+/* Create multiple raids, fire IOs randomly on various raids */
+static void
+test_multi_raid_with_io(void)
+{
+ struct rpc_construct_raid_bdev *construct_req;
+ struct rpc_destroy_raid_bdev destroy_req;
+ uint32_t i, j;
+ char name[16];
+ uint32_t count;
+ uint32_t bbdev_idx = 0;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx;
+ struct spdk_bdev_io *bdev_io;
+ uint64_t io_len;
+ uint64_t lba;
+ struct spdk_io_channel *ch_random;
+ struct raid_bdev_io_channel *ch_ctx_random;
+ int16_t iotype;
+ uint32_t raid_random;
+
+ set_globals();
+ construct_req = calloc(g_max_raids, sizeof(struct rpc_construct_raid_bdev));
+ SPDK_CU_ASSERT_FATAL(construct_req != NULL);
+ CU_ASSERT(raid_bdev_init() == 0);
+ ch = calloc(g_max_raids, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+ for (i = 0; i < g_max_raids; i++) {
+ count = snprintf(name, 16, "%s%u", "raid", i);
+ name[count] = '\0';
+ create_test_req(&construct_req[i], name, bbdev_idx, true);
+ verify_raid_config_present(name, false);
+ verify_raid_bdev_present(name, false);
+ bbdev_idx += g_max_base_drives;
+ rpc_req = &construct_req[i];
+ rpc_req_size = sizeof(construct_req[0]);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&construct_req[i], true);
+ verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(&ch[i]);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ CU_ASSERT(ch_ctx->base_channel != NULL);
+ for (j = 0; j < construct_req[i].base_bdevs.num_base_bdevs; j++) {
+ CU_ASSERT(ch_ctx->base_channel[j] == (void *)0x1);
+ }
+ }
+
+ lba = 0;
+ for (count = 0; count < g_max_qd; count++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = (rand() % g_strip_size) + 1;
+ iotype = (rand() % 2) ? SPDK_BDEV_IO_TYPE_WRITE : SPDK_BDEV_IO_TYPE_READ;
+ memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_random = rand() % g_max_raids;
+ ch_random = &ch[raid_random];
+ ch_ctx_random = spdk_io_channel_get_ctx(ch_random);
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, construct_req[raid_random].name) == 0) {
+ break;
+ }
+ }
+ bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, iotype);
+ lba += g_strip_size;
+ CU_ASSERT(pbdev != NULL);
+ raid_bdev_submit_request(ch_random, bdev_io);
+ verify_io(bdev_io, g_max_base_drives, ch_ctx_random, pbdev,
+ g_child_io_status_flag);
+ bdev_io_cleanup(bdev_io);
+ free(bdev_io);
+ }
+
+ for (i = 0; i < g_max_raids; i++) {
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(&ch[i]);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ destroy_req.name = strdup(construct_req[i].name);
+ count = snprintf(name, 16, "%s", destroy_req.name);
+ name[count] = '\0';
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present(name, false);
+ verify_raid_bdev_present(name, false);
+ }
+ raid_bdev_exit();
+ for (i = 0; i < g_max_raids; i++) {
+ free_test_req(&construct_req[i]);
+ }
+ free(construct_req);
+ free(ch);
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_io_type_supported(void)
+{
+ CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_READ) == true);
+ CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_WRITE) == true);
+ CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_FLUSH) == true);
+ CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_INVALID) == false);
+}
+
+static void
+test_create_raid_from_config(void)
+{
+ struct rpc_construct_raid_bdev req;
+ struct spdk_bdev *bdev;
+ struct rpc_destroy_raid_bdev destroy_req;
+ bool can_claim;
+ struct raid_bdev_config *raid_cfg;
+ uint32_t base_bdev_slot;
+
+ set_globals();
+ create_test_req(&req, "raid1", 0, true);
+ rpc_req = &req;
+ rpc_req_size = sizeof(req);
+ g_config_level_create = 1;
+ CU_ASSERT(raid_bdev_init() == 0);
+ g_config_level_create = 0;
+
+ verify_raid_config_present("raid1", true);
+ verify_raid_bdev_present("raid1", true);
+
+ TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) {
+ raid_bdev_examine(bdev);
+ }
+
+ can_claim = raid_bdev_can_claim_bdev("Invalid", &raid_cfg, &base_bdev_slot);
+ CU_ASSERT(can_claim == false);
+
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ free_test_req(&req);
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_create_raid_from_config_invalid_params(void)
+{
+ struct rpc_construct_raid_bdev req;
+ uint8_t count;
+
+ set_globals();
+ rpc_req = &req;
+ rpc_req_size = sizeof(req);
+ g_config_level_create = 1;
+
+ create_test_req(&req, "raid1", 0, true);
+ free(req.name);
+ req.name = NULL;
+ CU_ASSERT(raid_bdev_init() != 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_test_req(&req, "raid1", 0, false);
+ req.strip_size = 1234;
+ CU_ASSERT(raid_bdev_init() != 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_test_req(&req, "raid1", 0, false);
+ req.raid_level = 1;
+ CU_ASSERT(raid_bdev_init() != 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_test_req(&req, "raid1", 0, false);
+ req.raid_level = 1;
+ CU_ASSERT(raid_bdev_init() != 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_test_req(&req, "raid1", 0, false);
+ req.base_bdevs.num_base_bdevs++;
+ CU_ASSERT(raid_bdev_init() != 0);
+ req.base_bdevs.num_base_bdevs--;
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_test_req(&req, "raid1", 0, false);
+ req.base_bdevs.num_base_bdevs--;
+ CU_ASSERT(raid_bdev_init() != 0);
+ req.base_bdevs.num_base_bdevs++;
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ if (g_max_base_drives > 1) {
+ create_test_req(&req, "raid1", 0, false);
+ count = snprintf(req.base_bdevs.base_bdevs[g_max_base_drives - 1], 15, "%s", "Nvme0n1");
+ req.base_bdevs.base_bdevs[g_max_base_drives - 1][count] = '\0';
+ CU_ASSERT(raid_bdev_init() != 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ }
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_raid_json_dump_info(void)
+{
+ struct rpc_construct_raid_bdev req;
+ struct rpc_destroy_raid_bdev destroy_req;
+ struct raid_bdev *pbdev;
+
+ set_globals();
+ create_test_req(&req, "raid1", 0, true);
+ rpc_req = &req;
+ rpc_req_size = sizeof(req);
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present(req.name, false);
+ verify_raid_bdev_present(req.name, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, req.name) == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+
+ CU_ASSERT(raid_bdev_dump_info_json(pbdev, NULL) == 0);
+
+ free_test_req(&req);
+
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_context_size(void)
+{
+ CU_ASSERT(raid_bdev_get_ctx_size() == sizeof(struct raid_bdev_io));
+}
+
+static void
+test_asym_base_drives_blockcnt(void)
+{
+ struct rpc_construct_raid_bdev construct_req;
+ struct rpc_destroy_raid_bdev destroy_req;
+ struct spdk_bdev *bbdev;
+ uint32_t i;
+
+ set_globals();
+ create_test_req(&construct_req, "raid1", 0, true);
+ rpc_req = &construct_req;
+ rpc_req_size = sizeof(construct_req);
+ CU_ASSERT(raid_bdev_init() == 0);
+ verify_raid_config_present(construct_req.name, false);
+ verify_raid_bdev_present(construct_req.name, false);
+ g_rpc_err = 0;
+ for (i = 0; i < construct_req.base_bdevs.num_base_bdevs; i++) {
+ bbdev = spdk_bdev_get_by_name(construct_req.base_bdevs.base_bdevs[i]);
+ SPDK_CU_ASSERT_FATAL(bbdev != NULL);
+ bbdev->blockcnt = rand() + 1;
+ }
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&construct_req, true);
+ verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
+ free_test_req(&construct_req);
+
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("raid", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test_construct_raid", test_construct_raid) == NULL ||
+ CU_add_test(suite, "test_destroy_raid", test_destroy_raid) == NULL ||
+ CU_add_test(suite, "test_construct_raid_invalid_args", test_construct_raid_invalid_args) == NULL ||
+ CU_add_test(suite, "test_destroy_raid_invalid_args", test_destroy_raid_invalid_args) == NULL ||
+ CU_add_test(suite, "test_io_channel", test_io_channel) == NULL ||
+ CU_add_test(suite, "test_write_io", test_write_io) == NULL ||
+ CU_add_test(suite, "test_read_io", test_read_io) == NULL ||
+ CU_add_test(suite, "test_io_failure", test_io_failure) == NULL ||
+ CU_add_test(suite, "test_io_waitq", test_io_waitq) == NULL ||
+ CU_add_test(suite, "test_multi_raid_no_io", test_multi_raid_no_io) == NULL ||
+ CU_add_test(suite, "test_multi_raid_with_io", test_multi_raid_with_io) == NULL ||
+ CU_add_test(suite, "test_io_type_supported", test_io_type_supported) == NULL ||
+ CU_add_test(suite, "test_create_raid_from_config", test_create_raid_from_config) == NULL ||
+ CU_add_test(suite, "test_create_raid_from_config_invalid_params",
+ test_create_raid_from_config_invalid_params) == NULL ||
+ CU_add_test(suite, "test_raid_json_dump_info", test_raid_json_dump_info) == NULL ||
+ CU_add_test(suite, "test_context_size", test_context_size) == NULL ||
+ CU_add_test(suite, "test_asym_base_drives_blockcnt", test_asym_base_drives_blockcnt) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ set_test_opts();
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/.gitignore b/src/spdk/test/unit/lib/bdev/crypto.c/.gitignore
new file mode 100644
index 00000000..b2777562
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/.gitignore
@@ -0,0 +1 @@
+crypto_ut
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/Makefile b/src/spdk/test/unit/lib/bdev/crypto.c/Makefile
new file mode 100644
index 00000000..3241464b
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/Makefile
@@ -0,0 +1,41 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = crypto_ut.c
+CFLAGS += $(ENV_CFLAGS)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c b/src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c
new file mode 100644
index 00000000..f01aba19
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c
@@ -0,0 +1,908 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+#include "spdk_internal/mock.h"
+#include "unit/lib/json_mock.c"
+
+/* these rte_ headers are our local copies of the DPDK headers hacked to mock some functions
+ * included in them that can't be done with our mock library.
+ */
+#include "rte_crypto.h"
+#include "rte_cryptodev.h"
+DEFINE_STUB_V(rte_crypto_op_free, (struct rte_crypto_op *op));
+#include "bdev/crypto/vbdev_crypto.c"
+
+/* SPDK stubs */
+DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *,
+ (struct spdk_conf *cp, const char *name), NULL);
+DEFINE_STUB(spdk_conf_section_get_nval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx), NULL);
+DEFINE_STUB(spdk_conf_section_get_nmval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
+
+DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
+DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
+DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
+ enum spdk_bdev_io_type io_type), 0);
+DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
+DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
+DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
+DEFINE_STUB(spdk_env_get_current_core, uint32_t, (void), 0);
+DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
+DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
+ void *cb_arg));
+DEFINE_STUB(spdk_bdev_open, int, (struct spdk_bdev *bdev, bool write,
+ spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **_desc), 0);
+DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
+ struct spdk_bdev_module *module), 0);
+DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
+DEFINE_STUB(spdk_vbdev_register, int, (struct spdk_bdev *vbdev, struct spdk_bdev **base_bdevs,
+ int base_bdev_count), 0);
+DEFINE_STUB(spdk_bdev_get_by_name, struct spdk_bdev *, (const char *bdev_name), NULL);
+DEFINE_STUB(spdk_env_get_socket_id, uint32_t, (uint32_t core), 0);
+
+/* DPDK stubs */
+DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
+DEFINE_STUB(rte_eal_get_configuration, struct rte_config *, (void), NULL);
+DEFINE_STUB_V(rte_mempool_free, (struct rte_mempool *mp));
+DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
+DEFINE_STUB(rte_crypto_op_pool_create, struct rte_mempool *,
+ (const char *name, enum rte_crypto_op_type type, unsigned nb_elts,
+ unsigned cache_size, uint16_t priv_size, int socket_id), (struct rte_mempool *)1);
+DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0);
+DEFINE_STUB(rte_cryptodev_socket_id, int, (uint8_t dev_id), 0);
+DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0);
+DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool), 0);
+DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0)
+DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id));
+DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *,
+ (struct rte_mempool *mempool), (struct rte_cryptodev_sym_session *)1);
+DEFINE_STUB(rte_cryptodev_sym_session_clear, int, (uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess), 0);
+DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0);
+DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0);
+DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
+void __attribute__((noreturn)) __rte_panic(const char *funcname, const char *format, ...)
+{
+ abort();
+}
+struct rte_mempool_ops_table rte_mempool_ops_table;
+struct rte_cryptodev *rte_cryptodevs;
+__thread unsigned per_lcore__lcore_id = 0;
+
+/* global vars and setup/cleanup functions used for all test functions */
+struct spdk_bdev_io *g_bdev_io;
+struct crypto_bdev_io *g_io_ctx;
+struct crypto_io_channel *g_crypto_ch;
+struct spdk_io_channel *g_io_ch;
+struct vbdev_dev g_device;
+struct vbdev_crypto g_crypto_bdev;
+struct rte_config *g_test_config;
+struct device_qp g_dev_qp;
+
+#define MAX_TEST_BLOCKS 8192
+struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS];
+struct rte_crypto_op *g_test_dequeued_ops[MAX_TEST_BLOCKS];
+struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS];
+
+/* These globals are externs in our local rte_ header files so we can control
+ * specific functions for mocking.
+ */
+uint16_t g_dequeue_mock;
+uint16_t g_enqueue_mock;
+unsigned ut_rte_crypto_op_bulk_alloc;
+int ut_rte_crypto_op_attach_sym_session = 0;
+
+int ut_rte_cryptodev_info_get = 0;
+bool ut_rte_cryptodev_info_get_mocked = false;
+void
+rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
+{
+ dev_info->max_nb_queue_pairs = ut_rte_cryptodev_info_get;
+}
+
+unsigned int
+rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
+{
+ return (unsigned int)dev_id;
+}
+
+void
+spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
+{
+ cb(g_io_ch, g_bdev_io);
+}
+
+/* Mock these functions to call the callback and then return the value we require */
+int ut_spdk_bdev_readv_blocks = 0;
+bool ut_spdk_bdev_readv_blocks_mocked = false;
+int
+spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
+ return ut_spdk_bdev_readv_blocks;
+}
+
+int ut_spdk_bdev_writev_blocks = 0;
+bool ut_spdk_bdev_writev_blocks_mocked = false;
+int
+spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
+ return ut_spdk_bdev_writev_blocks;
+}
+
+int ut_spdk_bdev_unmap_blocks = 0;
+bool ut_spdk_bdev_unmap_blocks_mocked = false;
+int
+spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
+ return ut_spdk_bdev_unmap_blocks;
+}
+
+int ut_spdk_bdev_flush_blocks = 0;
+bool ut_spdk_bdev_flush_blocks_mocked = false;
+int
+spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
+ void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
+ return ut_spdk_bdev_flush_blocks;
+}
+
+int ut_spdk_bdev_reset = 0;
+bool ut_spdk_bdev_reset_mocked = false;
+int
+spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
+ return ut_spdk_bdev_reset;
+}
+
+bool g_completion_called = false;
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+ bdev_io->internal.status = status;
+ g_completion_called = true;
+}
+
+/* Used in testing device full condition */
+static inline uint16_t
+rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int i;
+
+ CU_ASSERT(nb_ops > 0);
+
+ for (i = 0; i < nb_ops; i++) {
+ /* Use this empty (til now) array of pointers to store
+ * enqueued operations for assertion in dev_full test.
+ */
+ g_test_dev_full_ops[i] = *ops++;
+ }
+
+ return g_enqueue_mock;
+}
+
+/* This is pretty ugly but in order to complete an IO via the
+ * poller in the submit path, we need to first call to this func
+ * to return the dequeued value and also decrement it. On the subsequent
+ * call it needs to return 0 to indicate to the caller that there are
+ * no more IOs to drain.
+ */
+int g_test_overflow = 0;
+static inline uint16_t
+rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ CU_ASSERT(nb_ops > 0);
+
+ /* A crypto device can be full on enqueue, the driver is designed to drain
+ * the device at the time by calling the poller until it's empty, then
+ * submitting the remaining crypto ops.
+ */
+ if (g_test_overflow) {
+ if (g_dequeue_mock == 0) {
+ return 0;
+ }
+ *ops = g_test_crypto_ops[g_enqueue_mock];
+ (*ops)->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ g_dequeue_mock -= 1;
+ }
+ return (g_dequeue_mock + 1);
+}
+
+/* Instead of allocating real memory, assign the allocations to our
+ * test array for assertion in tests.
+ */
+static inline unsigned
+rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int i;
+
+ for (i = 0; i < nb_ops; i++) {
+ *ops++ = g_test_crypto_ops[i];
+ }
+ return ut_rte_crypto_op_bulk_alloc;
+}
+
+static __rte_always_inline void
+rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
+ unsigned int n)
+{
+ return;
+}
+
+static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
+{
+ return NULL;
+}
+
+
+static inline int
+rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ return ut_rte_crypto_op_attach_sym_session;
+}
+
+/* Global setup for all tests that share a bunch of preparation... */
+static int
+test_setup(void)
+{
+ int i;
+
+ /* Prepare essential variables for test routines */
+ g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct crypto_bdev_io));
+ g_bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec) * 128);
+ g_bdev_io->bdev = &g_crypto_bdev.crypto_bdev;
+ g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct crypto_io_channel));
+ g_crypto_ch = (struct crypto_io_channel *)((uint8_t *)g_io_ch + sizeof(struct spdk_io_channel));
+ g_io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
+ memset(&g_device, 0, sizeof(struct vbdev_dev));
+ memset(&g_crypto_bdev, 0, sizeof(struct vbdev_crypto));
+ g_dev_qp.device = &g_device;
+ g_io_ctx->crypto_ch = g_crypto_ch;
+ g_io_ctx->crypto_bdev = &g_crypto_bdev;
+ g_crypto_ch->device_qp = &g_dev_qp;
+ g_test_config = calloc(1, sizeof(struct rte_config));
+ g_test_config->lcore_count = 1;
+
+ /* Allocate a real mbuf pool so we can test error paths */
+ g_mbuf_mp = spdk_mempool_create("mbuf_mp", NUM_MBUFS, sizeof(struct rte_mbuf),
+ SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
+ SPDK_ENV_SOCKET_ID_ANY);
+
+ /* Instead of allocating real rte mempools for these, it's easier and provides the
+ * same coverage just calloc them here.
+ */
+ for (i = 0; i < MAX_TEST_BLOCKS; i++) {
+ g_test_crypto_ops[i] = calloc(1, sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op));
+ g_test_dequeued_ops[i] = calloc(1, sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op));
+ }
+ return 0;
+}
+
+/* Global teardown for all tests */
+static int
+test_cleanup(void)
+{
+ int i;
+
+ free(g_test_config);
+ spdk_mempool_free(g_mbuf_mp);
+ for (i = 0; i < MAX_TEST_BLOCKS; i++) {
+ free(g_test_crypto_ops[i]);
+ free(g_test_dequeued_ops[i]);
+ }
+ free(g_bdev_io->u.bdev.iovs);
+ free(g_bdev_io);
+ free(g_io_ch);
+ return 0;
+}
+
+static void
+test_error_paths(void)
+{
+ /* Single element block size write, just to test error paths
+ * in vbdev_crypto_submit_request().
+ */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = 1;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_crypto_bdev.crypto_bdev.blocklen = 512;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
+
+ /* test failure of spdk_mempool_get_bulk() */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ MOCK_SET(spdk_mempool_get, NULL);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+
+ /* same thing but switch to reads to test error path in _crypto_complete_io() */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ /* Now with the read_blocks failing */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ MOCK_SET(spdk_bdev_readv_blocks, -1);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_SET(spdk_bdev_readv_blocks, 0);
+ MOCK_CLEAR(spdk_mempool_get);
+
+ /* test failure of rte_crypto_op_bulk_alloc() */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ ut_rte_crypto_op_bulk_alloc = 0;
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ ut_rte_crypto_op_bulk_alloc = 1;
+
+ /* test failure of rte_cryptodev_sym_session_create() */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ MOCK_SET(rte_cryptodev_sym_session_create, NULL);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_SET(rte_cryptodev_sym_session_create, (struct rte_cryptodev_sym_session *)1);
+
+ /* test failure of rte_cryptodev_sym_session_init() */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ MOCK_SET(rte_cryptodev_sym_session_init, -1);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_SET(rte_cryptodev_sym_session_init, 0);
+
+ /* test failure of rte_crypto_op_attach_sym_session() */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ ut_rte_crypto_op_attach_sym_session = -1;
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ ut_rte_crypto_op_attach_sym_session = 0;
+}
+
+static void
+test_simple_write(void)
+{
+ /* Single element block size write */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = 1;
+ g_bdev_io->u.bdev.offset_blocks = 0;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_write;
+ g_crypto_bdev.crypto_bdev.blocklen = 512;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
+ CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT);
+ CU_ASSERT(g_io_ctx->cry_iov.iov_len == 512);
+ CU_ASSERT(g_io_ctx->cry_iov.iov_base != NULL);
+ CU_ASSERT(g_io_ctx->cry_offset_blocks == 0);
+ CU_ASSERT(g_io_ctx->cry_num_blocks == 1);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_write);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr != NULL);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == 512);
+
+ spdk_dma_free(g_io_ctx->cry_iov.iov_base);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_dst);
+}
+
+static void
+test_simple_read(void)
+{
+ /* Single element block size read */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = 1;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_read;
+ g_crypto_bdev.crypto_bdev.blocklen = 512;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
+ CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_DECRYPT);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_read);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
+
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
+}
+
+static void
+test_large_rw(void)
+{
+ unsigned block_len = 512;
+ unsigned num_blocks = CRYPTO_MAX_IO / block_len;
+ unsigned io_len = block_len * num_blocks;
+ unsigned i;
+
+ /* Multi block size read, multi-element */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
+ CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_DECRYPT);
+
+ for (i = 0; i < num_blocks; i++) {
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
+ }
+
+ /* Multi block size write, multi-element */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
+ CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT);
+
+ for (i = 0; i < num_blocks; i++) {
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_io_ctx->cry_iov.iov_len == io_len);
+ CU_ASSERT(g_io_ctx->cry_iov.iov_base != NULL);
+ CU_ASSERT(g_io_ctx->cry_offset_blocks == 0);
+ CU_ASSERT(g_io_ctx->cry_num_blocks == num_blocks);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr != NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
+ }
+ spdk_dma_free(g_io_ctx->cry_iov.iov_base);
+}
+
+static void
+test_dev_full(void)
+{
+ unsigned block_len = 512;
+ unsigned num_blocks = 2;
+ unsigned io_len = block_len * num_blocks;
+ unsigned i;
+
+ g_test_overflow = 1;
+
+ /* Multi block size read, multi-element */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_dev_full;
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_enqueue_mock = g_dequeue_mock = 1;
+ ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* this test only completes one of the 2 IOs (in the drain path) */
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
+ CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_DECRYPT);
+
+ for (i = 0; i < num_blocks; i++) {
+ /* One of the src_mbufs was freed because of the device full condition so
+ * we can't assert its value here.
+ */
+ CU_ASSERT(g_test_dev_full_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_dev_full_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_dev_full_ops[i]->sym->m_src == g_test_dev_full_ops[i]->sym->m_src);
+ CU_ASSERT(g_test_dev_full_ops[i]->sym->m_dst == NULL);
+ }
+
+ /* Only one of the 2 blocks in the test was freed on completion by design, so
+ * we need to free th other one here.
+ */
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
+ g_test_overflow = 0;
+}
+
+static void
+test_crazy_rw(void)
+{
+ unsigned block_len = 512;
+ int num_blocks = 4;
+ int i;
+
+ /* Multi block size read, single element, strange IOV makeup */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 3;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
+ g_bdev_io->u.bdev.iovs[1].iov_len = 1024;
+ g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 512;
+ g_bdev_io->u.bdev.iovs[2].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 512 + 1024;
+
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
+ CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_DECRYPT);
+
+ for (i = 0; i < num_blocks; i++) {
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
+ }
+
+ /* Multi block size write, single element strange IOV makeup */
+ num_blocks = 8;
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 4;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 2048;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
+ g_bdev_io->u.bdev.iovs[1].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 2048;
+ g_bdev_io->u.bdev.iovs[2].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 2048 + 512;
+ g_bdev_io->u.bdev.iovs[3].iov_len = 1024;
+ g_bdev_io->u.bdev.iovs[3].iov_base = &test_crazy_rw + 2048 + 512 + 512;
+
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
+ CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT);
+
+ for (i = 0; i < num_blocks; i++) {
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == g_test_crypto_ops[i]->sym->m_dst);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
+ }
+ spdk_dma_free(g_io_ctx->cry_iov.iov_base);
+}
+
+static void
+test_passthru(void)
+{
+ /* Make sure these follow our completion callback, test success & fail. */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
+ MOCK_SET(spdk_bdev_unmap_blocks, 0);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ MOCK_SET(spdk_bdev_unmap_blocks, -1);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_CLEAR(spdk_bdev_unmap_blocks);
+
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
+ MOCK_SET(spdk_bdev_flush_blocks, 0);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ MOCK_SET(spdk_bdev_flush_blocks, -1);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_CLEAR(spdk_bdev_flush_blocks);
+
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
+ MOCK_SET(spdk_bdev_reset, 0);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ MOCK_SET(spdk_bdev_reset, -1);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_CLEAR(spdk_bdev_reset);
+
+ /* We should never get a WZ command, we report that we don't support it. */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+}
+
+static void
+test_initdrivers(void)
+{
+ int rc;
+ static struct spdk_mempool *orig_mbuf_mp;
+ static struct spdk_mempool *orig_session_mp;
+
+ /* No drivers available, not an error though */
+ MOCK_SET(rte_eal_get_configuration, g_test_config);
+ MOCK_SET(rte_cryptodev_count, 0);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == 0);
+
+ /* Test failure of DPDK dev init. */
+ MOCK_SET(rte_cryptodev_count, 2);
+ MOCK_SET(rte_vdev_init, -1);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == -EINVAL);
+ MOCK_SET(rte_vdev_init, 0);
+
+ /* Can't create session pool. */
+ MOCK_SET(spdk_mempool_create, NULL);
+ orig_mbuf_mp = g_mbuf_mp;
+ orig_session_mp = g_session_mp;
+ rc = vbdev_crypto_init_crypto_drivers();
+ g_mbuf_mp = orig_mbuf_mp;
+ g_session_mp = orig_session_mp;
+ CU_ASSERT(rc == -ENOMEM);
+ MOCK_CLEAR(spdk_mempool_create);
+
+ /* Can't create op pool. These tests will alloc and free our g_mbuf_mp
+ * so save that off here and restore it after each test is over.
+ */
+ orig_mbuf_mp = g_mbuf_mp;
+ orig_session_mp = g_session_mp;
+ MOCK_SET(rte_crypto_op_pool_create, NULL);
+ rc = vbdev_crypto_init_crypto_drivers();
+ g_mbuf_mp = orig_mbuf_mp;
+ g_session_mp = orig_session_mp;
+ CU_ASSERT(rc == -ENOMEM);
+ MOCK_SET(rte_crypto_op_pool_create, (struct rte_mempool *)1);
+
+ /* Check resources are sufficient failure. */
+ orig_mbuf_mp = g_mbuf_mp;
+ orig_session_mp = g_session_mp;
+ rc = vbdev_crypto_init_crypto_drivers();
+ g_mbuf_mp = orig_mbuf_mp;
+ g_session_mp = orig_session_mp;
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Test crypto dev configure failure. */
+ MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
+ MOCK_SET(rte_cryptodev_info_get, 1);
+ MOCK_SET(rte_cryptodev_configure, -1);
+ orig_mbuf_mp = g_mbuf_mp;
+ orig_session_mp = g_session_mp;
+ rc = vbdev_crypto_init_crypto_drivers();
+ g_mbuf_mp = orig_mbuf_mp;
+ g_session_mp = orig_session_mp;
+ MOCK_SET(rte_cryptodev_configure, 0);
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Test failure of qp setup. */
+ MOCK_SET(rte_cryptodev_queue_pair_setup, -1);
+ orig_mbuf_mp = g_mbuf_mp;
+ orig_session_mp = g_session_mp;
+ rc = vbdev_crypto_init_crypto_drivers();
+ g_mbuf_mp = orig_mbuf_mp;
+ g_session_mp = orig_session_mp;
+ CU_ASSERT(rc == -EINVAL);
+ MOCK_SET(rte_cryptodev_queue_pair_setup, 0);
+
+ /* Test failure of dev start. */
+ MOCK_SET(rte_cryptodev_start, -1);
+ orig_mbuf_mp = g_mbuf_mp;
+ orig_session_mp = g_session_mp;
+ rc = vbdev_crypto_init_crypto_drivers();
+ g_mbuf_mp = orig_mbuf_mp;
+ g_session_mp = orig_session_mp;
+ CU_ASSERT(rc == -EINVAL);
+ MOCK_SET(rte_cryptodev_start, 0);
+
+ /* Test happy path. */
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == 0);
+}
+
+static void
+test_crypto_op_complete(void)
+{
+ /* Make sure completion code respects failure. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
+ g_completion_called = false;
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_completion_called == true);
+
+ /* Test read completion. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_completion_called = false;
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_completion_called == true);
+
+ /* Test write completion success. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_completion_called = false;
+ MOCK_SET(spdk_bdev_writev_blocks, 0);
+ /* Code under test will free this, if not ASAN will complain. */
+ g_io_ctx->cry_iov.iov_base = spdk_dma_malloc(16, 0x10, NULL);
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_completion_called == true);
+
+ /* Test write completion failed. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_completion_called = false;
+ MOCK_SET(spdk_bdev_writev_blocks, -1);
+ /* Code under test will free this, if not ASAN will complain. */
+ g_io_ctx->cry_iov.iov_base = spdk_dma_malloc(16, 0x10, NULL);
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_completion_called == true);
+
+ /* Test bogus type for this completion. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
+ g_completion_called = false;
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_completion_called == true);
+}
+
+static void
+test_supported_io(void)
+{
+ void *ctx = NULL;
+ bool rc = true;
+
+ /* Make sure we always report false to WZ, we need the bdev layer to
+ * send real 0's so we can encrypt/decrypt them.
+ */
+ rc = vbdev_crypto_io_type_supported(ctx, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
+ CU_ASSERT(rc == false);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("crypto", test_setup, test_cleanup);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (CU_add_test(suite, "test_error_paths",
+ test_error_paths) == NULL ||
+ CU_add_test(suite, "test_simple_write",
+ test_simple_write) == NULL ||
+ CU_add_test(suite, "test_simple_read",
+ test_simple_read) == NULL ||
+ CU_add_test(suite, "test_large_rw",
+ test_large_rw) == NULL ||
+ CU_add_test(suite, "test_dev_full",
+ test_dev_full) == NULL ||
+ CU_add_test(suite, "test_crazy_rw",
+ test_crazy_rw) == NULL ||
+ CU_add_test(suite, "test_passthru",
+ test_passthru) == NULL ||
+ CU_add_test(suite, "test_initdrivers",
+ test_initdrivers) == NULL ||
+ CU_add_test(suite, "test_crypto_op_complete",
+ test_crypto_op_complete) == NULL ||
+ CU_add_test(suite, "test_supported_io",
+ test_supported_io) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/rte_crypto.h b/src/spdk/test/unit/lib/bdev/crypto.c/rte_crypto.h
new file mode 100644
index 00000000..a53a71df
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/rte_crypto.h
@@ -0,0 +1,95 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * Copyright(c) 2016 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUcryptoION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTO_H_
+#define _RTE_CRYPTO_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* In order to mock some DPDK functions, we place headers here with the name name as the DPDK headers
+ * so these definitions wil be picked up. Only what's mocked is included.
+ */
+
+#include "rte_mbuf.h"
+#include "rte_mempool.h"
+#include "rte_crypto_sym.h"
+
+enum rte_crypto_op_type {
+ RTE_CRYPTO_OP_TYPE_UNDEFINED,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+};
+
+enum rte_crypto_op_status {
+ RTE_CRYPTO_OP_STATUS_SUCCESS,
+ RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION,
+ RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
+ RTE_CRYPTO_OP_STATUS_ERROR,
+};
+
+struct rte_crypto_op {
+ uint8_t type;
+ uint8_t status;
+ uint8_t sess_type;
+ uint8_t reserved[5];
+ struct rte_mempool *mempool;
+ rte_iova_t phys_addr;
+ __extension__
+ union {
+ struct rte_crypto_sym_op sym[0];
+ };
+};
+
+extern struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+ int socket_id);
+
+static inline unsigned
+rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
+
+static inline int
+rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
+ struct rte_cryptodev_sym_session *sess);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/rte_cryptodev.h b/src/spdk/test/unit/lib/bdev/crypto.c/rte_cryptodev.h
new file mode 100644
index 00000000..b941a20d
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/rte_cryptodev.h
@@ -0,0 +1,153 @@
+/*-
+ *
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
+ * Copyright 2014 6WIND S.A.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTODEV_H_
+#define _RTE_CRYPTODEV_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* In order to mock some DPDK functions, we place headers here with the name name as the DPDK headers
+ * so these definitions wil be picked up. Only what's mocked is included.
+ */
+
+uint8_t dummy[16];
+#define rte_crypto_op_ctod_offset(c, t, o) &dummy[0]
+
+#define RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER (1ULL << 9)
+
+struct rte_cryptodev_info {
+ const char *driver_name;
+ uint8_t driver_id;
+ struct rte_pci_device *pci_dev;
+ uint64_t feature_flags;
+ const struct rte_cryptodev_capabilities *capabilities;
+ unsigned max_nb_queue_pairs;
+ struct {
+ unsigned max_nb_sessions;
+ unsigned int max_nb_sessions_per_qp;
+ } sym;
+};
+
+enum rte_cryptodev_event_type {
+ RTE_CRYPTODEV_EVENT_UNKNOWN,
+ RTE_CRYPTODEV_EVENT_ERROR,
+ RTE_CRYPTODEV_EVENT_MAX
+};
+
+struct rte_cryptodev_qp_conf {
+ uint32_t nb_descriptors;
+};
+
+struct rte_cryptodev_stats {
+ uint64_t enqueued_count;
+ uint64_t dequeued_count;
+ uint64_t enqueue_err_count;
+ uint64_t dequeue_err_count;
+};
+
+#define RTE_CRYPTODEV_NAME_MAX_LEN (64)
+
+extern uint8_t
+rte_cryptodev_count(void);
+
+extern uint8_t
+rte_cryptodev_device_count_by_driver(uint8_t driver_id);
+
+extern int
+rte_cryptodev_socket_id(uint8_t dev_id);
+
+struct rte_cryptodev_config {
+ int socket_id;
+ uint16_t nb_queue_pairs;
+};
+
+extern int
+rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
+
+extern int
+rte_cryptodev_start(uint8_t dev_id);
+
+extern void
+rte_cryptodev_stop(uint8_t dev_id);
+
+extern int
+rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf, int socket_id,
+ struct rte_mempool *session_pool);
+
+extern void
+rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
+
+static inline uint16_t
+rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
+
+static inline uint16_t
+rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
+
+struct rte_cryptodev_sym_session {
+ __extension__ void *sess_private_data[0];
+};
+
+struct rte_cryptodev_asym_session {
+ __extension__ void *sess_private_data[0];
+};
+
+struct rte_crypto_asym_xform;
+
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(struct rte_mempool *mempool);
+
+int
+rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess);
+
+int
+rte_cryptodev_sym_session_init(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_crypto_sym_xform *xforms,
+ struct rte_mempool *mempool);
+
+int
+rte_cryptodev_sym_session_clear(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess);
+
+unsigned int
+rte_cryptodev_sym_get_private_session_size(uint8_t dev_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/rte_mbuf.h b/src/spdk/test/unit/lib/bdev/crypto.c/rte_mbuf.h
new file mode 100644
index 00000000..4d69f482
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/rte_mbuf.h
@@ -0,0 +1,148 @@
+/*-
+ *
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
+ * Copyright 2014 6WIND S.A.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MBUF_H_
+#define _RTE_MBUF_H_
+
+#include "rte_mempool.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* In order to mock some DPDK functions, we place headers here with the name name as the DPDK headers
+ * so these definitions wil be picked up. Only what's mocked is included.
+ */
+
+__extension__
+typedef void *MARKER[0];
+__extension__
+typedef uint8_t MARKER8[0];
+__extension__
+typedef uint64_t MARKER64[0];
+
+struct rte_mbuf {
+ MARKER cacheline0;
+ void *buf_addr;
+ RTE_STD_C11
+ union {
+ rte_iova_t buf_iova;
+ rte_iova_t buf_physaddr;
+ } __rte_aligned(sizeof(rte_iova_t));
+ MARKER64 rearm_data;
+ uint16_t data_off;
+ RTE_STD_C11
+ union {
+ rte_atomic16_t refcnt_atomic;
+ uint16_t refcnt;
+ };
+ uint16_t nb_segs;
+ uint16_t port;
+ uint64_t ol_flags;
+ MARKER rx_descriptor_fields1;
+ RTE_STD_C11
+ union {
+ uint32_t packet_type;
+ struct {
+ uint32_t l2_type: 4;
+ uint32_t l3_type: 4;
+ uint32_t l4_type: 4;
+ uint32_t tun_type: 4;
+ RTE_STD_C11
+ union {
+ uint8_t inner_esp_next_proto;
+ __extension__
+ struct {
+ uint8_t inner_l2_type: 4;
+ uint8_t inner_l3_type: 4;
+ };
+ };
+ uint32_t inner_l4_type: 4;
+ };
+ };
+ uint32_t pkt_len;
+ uint16_t data_len;
+ uint16_t vlan_tci;
+ union {
+ uint32_t rss;
+ struct {
+ RTE_STD_C11
+ union {
+ struct {
+ uint16_t hash;
+ uint16_t id;
+ };
+ uint32_t lo;
+ };
+ uint32_t hi;
+ } fdir;
+ struct {
+ uint32_t lo;
+ uint32_t hi;
+ } sched;
+ uint32_t usr;
+ } hash;
+ uint16_t vlan_tci_outer;
+ uint16_t buf_len;
+ uint64_t timestamp;
+ MARKER cacheline1 __rte_cache_min_aligned;
+ RTE_STD_C11
+ union {
+ void *userdata;
+ uint64_t udata64;
+ };
+ struct rte_mempool *pool;
+ struct rte_mbuf *next;
+ RTE_STD_C11
+ union {
+ uint64_t tx_offload;
+ __extension__
+ struct {
+ uint64_t l2_len: 7;
+ uint64_t l3_len: 9;
+ uint64_t l4_len: 8;
+ uint64_t tso_segsz: 16;
+ uint64_t outer_l3_len: 9;
+ uint64_t outer_l2_len: 7;
+ };
+ };
+ uint16_t priv_size;
+ uint16_t timesync;
+ uint32_t seqn;
+
+} __rte_cache_aligned;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/rte_mempool.h b/src/spdk/test/unit/lib/bdev/crypto.c/rte_mempool.h
new file mode 100644
index 00000000..5750d30f
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/rte_mempool.h
@@ -0,0 +1,145 @@
+/*-
+ *
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
+ * Copyright 2014 6WIND S.A.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MEMPOOL_H_
+#define _RTE_MEMPOOL_H_
+
+/**
+ * @file
+ * RTE Mempool.
+ *
+ * A memory pool is an allocator of fixed-size object. It is
+ * identified by its name, and uses a ring to store free objects. It
+ * provides some other optional services, like a per-core object
+ * cache, and an alignment helper to ensure that objects are padded
+ * to spread them equally on all RAM channels, ranks, and so on.
+ *
+ * Objects owned by a mempool should never be added in another
+ * mempool. When an object is freed using rte_mempool_put() or
+ * equivalent, the object data is not modified; the user can save some
+ * meta-data in the object data and retrieve them when allocating a
+ * new object.
+ *
+ * Note: the mempool implementation is not preemptible. An lcore must not be
+ * interrupted by another task that uses the same mempool (because it uses a
+ * ring which is not preemptible). Also, usual mempool functions like
+ * rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL
+ * thread due to the internal per-lcore cache. Due to the lack of caching,
+ * rte_mempool_get() or rte_mempool_put() performance will suffer when called
+ * by non-EAL threads. Instead, non-EAL threads should call
+ * rte_mempool_generic_get() or rte_mempool_generic_put() with a user cache
+ * created with rte_mempool_cache_create().
+ */
+
+#include <rte_config.h>
+#include <rte_spinlock.h>
+#include <rte_debug.h>
+#include <rte_ring.h>
+#include <rte_memcpy.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* In order to mock some DPDK functions, we place headers here with the name name as the DPDK headers
+ * so these definitions wil be picked up. Only what's mocked is included.
+ */
+
+STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr);
+STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr);
+struct rte_mempool {
+ char name[RTE_MEMZONE_NAMESIZE];
+ RTE_STD_C11
+ union {
+ void *pool_data;
+ uint64_t pool_id;
+ };
+ void *pool_config;
+ const struct rte_memzone *mz;
+ unsigned int flags;
+ int socket_id;
+ uint32_t size;
+ uint32_t cache_size;
+ uint32_t elt_size;
+ uint32_t header_size;
+ uint32_t trailer_size;
+ unsigned private_data_size;
+ int32_t ops_index;
+ struct rte_mempool_cache *local_cache;
+ uint32_t populated_size;
+ struct rte_mempool_objhdr_list elt_list;
+ uint32_t nb_mem_chunks;
+ struct rte_mempool_memhdr_list mem_list;
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
+#endif
+} __rte_cache_aligned;
+#define RTE_MEMPOOL_OPS_NAMESIZE 32
+typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp);
+typedef void (*rte_mempool_free_t)(struct rte_mempool *mp);
+typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp,
+ void *const *obj_table, unsigned int n);
+typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
+ void **obj_table, unsigned int n);
+typedef unsigned(*rte_mempool_get_count)(const struct rte_mempool *mp);
+typedef int (*rte_mempool_get_capabilities_t)(const struct rte_mempool *mp,
+ unsigned int *flags);
+typedef int (*rte_mempool_ops_register_memory_area_t)
+(const struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len);
+struct rte_mempool_ops {
+ char name[RTE_MEMPOOL_OPS_NAMESIZE];
+ rte_mempool_alloc_t alloc;
+ rte_mempool_free_t free;
+ rte_mempool_enqueue_t enqueue;
+ rte_mempool_dequeue_t dequeue;
+ rte_mempool_get_count get_count;
+ rte_mempool_get_capabilities_t get_capabilities;
+ rte_mempool_ops_register_memory_area_t register_memory_area;
+} __rte_cache_aligned;
+#define RTE_MEMPOOL_MAX_OPS_IDX 16
+struct rte_mempool_ops_table {
+ rte_spinlock_t sl;
+ uint32_t num_ops;
+ struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX];
+} __rte_cache_aligned;
+extern struct rte_mempool_ops_table rte_mempool_ops_table;
+void
+rte_mempool_free(struct rte_mempool *mp);
+static __rte_always_inline void
+rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
+ unsigned int n);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMPOOL_H_ */
diff --git a/src/spdk/test/unit/lib/bdev/gpt/Makefile b/src/spdk/test/unit/lib/bdev/gpt/Makefile
new file mode 100644
index 00000000..2fad9ba0
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/gpt/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = gpt.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/bdev/gpt/gpt.c/.gitignore b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/.gitignore
new file mode 100644
index 00000000..74d476f5
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/.gitignore
@@ -0,0 +1 @@
+gpt_ut
diff --git a/src/spdk/test/unit/lib/bdev/gpt/gpt.c/Makefile b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/Makefile
new file mode 100644
index 00000000..ad21ea2a
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = gpt_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/gpt/gpt.c/gpt_ut.c b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/gpt_ut.c
new file mode 100644
index 00000000..3182f9c4
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/gpt_ut.c
@@ -0,0 +1,297 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+
+#include "bdev/gpt/gpt.c"
+
+static void
+test_check_mbr(void)
+{
+ struct spdk_gpt *gpt;
+ struct spdk_mbr *mbr;
+ unsigned char a[SPDK_GPT_BUFFER_SIZE];
+ int re;
+
+ /* spdk_gpt_check_mbr(NULL) does not exist, NULL is filtered out in spdk_gpt_parse() */
+ gpt = calloc(1, sizeof(*gpt));
+ SPDK_CU_ASSERT_FATAL(gpt != NULL);
+
+ /* Set *gpt is "aaa...", all are mismatch include mbr_signature */
+ memset(a, 'a', sizeof(a));
+ gpt->buf = &a[0];
+ re = spdk_gpt_check_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set mbr->mbr_signature matched, start lba mismatch */
+ mbr = (struct spdk_mbr *)gpt->buf;
+ mbr->mbr_signature = 0xAA55;
+ re = spdk_gpt_check_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set mbr->partitions[0].start lba matched, os_type mismatch */
+ mbr->partitions[0].start_lba = 1;
+ re = spdk_gpt_check_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set mbr->partitions[0].os_type matched, size_lba mismatch */
+ mbr->partitions[0].os_type = 0xEE;
+ re = spdk_gpt_check_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set mbr->partitions[0].size_lba matched, passing case */
+ mbr->partitions[0].size_lba = 0xFFFFFFFF;
+ re = spdk_gpt_check_mbr(gpt);
+ CU_ASSERT(re == 0);
+
+ free(gpt);
+}
+
+static void
+test_read_header(void)
+{
+ struct spdk_gpt *gpt;
+ struct spdk_gpt_header *head;
+ unsigned char a[SPDK_GPT_BUFFER_SIZE];
+ int re;
+
+ /* spdk_gpt_read_header(NULL) does not exist, NULL is filtered out in spdk_gpt_parse() */
+ gpt = calloc(1, sizeof(*gpt));
+ SPDK_CU_ASSERT_FATAL(gpt != NULL);
+
+ /* Set *gpt is "aaa..." */
+ memset(a, 'a', sizeof(a));
+ gpt->buf = &a[0];
+
+ /* Set header_size mismatch */
+ gpt->sector_size = 512;
+ head = (struct spdk_gpt_header *)(gpt->buf + GPT_PRIMARY_PARTITION_TABLE_LBA * gpt->sector_size);
+ to_le32(&head->header_size, 0x258);
+ re = spdk_gpt_read_header(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set head->header_size matched, header_crc32 mismatch */
+ head->header_size = sizeof(*head);
+ to_le32(&head->header_crc32, 0x22D18C80);
+ re = spdk_gpt_read_header(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set head->header_crc32 matched, gpt_signature mismatch */
+ to_le32(&head->header_crc32, 0xC5B2117E);
+ re = spdk_gpt_read_header(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set head->gpt_signature matched, lba_end usable_lba mismatch */
+ to_le32(&head->header_crc32, 0xD637335A);
+ head->gpt_signature[0] = 'E';
+ head->gpt_signature[1] = 'F';
+ head->gpt_signature[2] = 'I';
+ head->gpt_signature[3] = ' ';
+ head->gpt_signature[4] = 'P';
+ head->gpt_signature[5] = 'A';
+ head->gpt_signature[6] = 'R';
+ head->gpt_signature[7] = 'T';
+ re = spdk_gpt_read_header(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set gpt->lba_end usable_lba matched, passing case */
+ to_le32(&head->header_crc32, 0x30CB7378);
+ to_le64(&gpt->lba_start, 0x0);
+ to_le64(&gpt->lba_end, 0x2E935FFE);
+ to_le64(&head->first_usable_lba, 0xA);
+ to_le64(&head->last_usable_lba, 0xF4240);
+ re = spdk_gpt_read_header(gpt);
+ CU_ASSERT(re == 0);
+
+ free(gpt);
+}
+
+static void
+test_read_partitions(void)
+{
+ struct spdk_gpt *gpt;
+ struct spdk_gpt_header *head;
+ unsigned char a[SPDK_GPT_BUFFER_SIZE];
+ int re;
+
+ /* spdk_gpt_read_partitions(NULL) does not exist, NULL is filtered out in spdk_gpt_parse() */
+ gpt = calloc(1, sizeof(*gpt));
+ SPDK_CU_ASSERT_FATAL(gpt != NULL);
+
+ /* Set *gpt is "aaa..." */
+ memset(a, 'a', sizeof(a));
+ gpt->buf = &a[0];
+
+ /* Set num_partition_entries exceeds Max value of entries GPT supported */
+ gpt->sector_size = 512;
+ head = (struct spdk_gpt_header *)(gpt->buf + GPT_PRIMARY_PARTITION_TABLE_LBA * gpt->sector_size);
+ gpt->header = head;
+ to_le32(&head->num_partition_entries, 0x100);
+ re = spdk_gpt_read_partitions(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set num_partition_entries within Max value, size_of_partition_entry mismatch */
+ to_le32(&head->header_crc32, 0x573857BE);
+ to_le32(&head->num_partition_entries, 0x40);
+ to_le32(&head->size_of_partition_entry, 0x0);
+ re = spdk_gpt_read_partitions(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set size_of_partition_entry matched, partition_entry_lba mismatch */
+ to_le32(&head->header_crc32, 0x5279B712);
+ to_le32(&head->size_of_partition_entry, 0x80);
+ to_le64(&head->partition_entry_lba, 0x64);
+ re = spdk_gpt_read_partitions(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set partition_entry_lba matched, partition_entry_array_crc32 mismatch */
+ to_le32(&head->header_crc32, 0xEC093B43);
+ to_le64(&head->partition_entry_lba, 0x20);
+ to_le32(&head->partition_entry_array_crc32, 0x0);
+ re = spdk_gpt_read_partitions(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set partition_entry_array_crc32 matched, passing case */
+ to_le32(&head->header_crc32, 0xE1A08822);
+ to_le32(&head->partition_entry_array_crc32, 0xEBEE44FB);
+ to_le32(&head->num_partition_entries, 0x80);
+ re = spdk_gpt_read_partitions(gpt);
+ CU_ASSERT(re == 0);
+
+ free(gpt);
+}
+
+static void
+test_parse(void)
+{
+ struct spdk_gpt *gpt;
+ struct spdk_mbr *mbr;
+ struct spdk_gpt_header *head;
+ unsigned char a[SPDK_GPT_BUFFER_SIZE];
+ int re;
+
+ /* Set gpt is NULL */
+ re = spdk_gpt_parse(NULL);
+ CU_ASSERT(re == -1);
+
+ /* Set gpt->buf is NULL */
+ gpt = calloc(1, sizeof(*gpt));
+ SPDK_CU_ASSERT_FATAL(gpt != NULL);
+ re = spdk_gpt_parse(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set *gpt is "aaa...", check_mbr failed */
+ memset(a, 'a', sizeof(a));
+ gpt->buf = &a[0];
+ re = spdk_gpt_parse(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set check_mbr passed, read_header failed */
+ mbr = (struct spdk_mbr *)gpt->buf;
+ mbr->mbr_signature = 0xAA55;
+ mbr->partitions[0].start_lba = 1;
+ mbr->partitions[0].os_type = 0xEE;
+ mbr->partitions[0].size_lba = 0xFFFFFFFF;
+ re = spdk_gpt_parse(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set read_header passed, read_partitions failed */
+ gpt->sector_size = 512;
+ head = (struct spdk_gpt_header *)(gpt->buf + GPT_PRIMARY_PARTITION_TABLE_LBA * gpt->sector_size);
+ head->header_size = sizeof(*head);
+ head->gpt_signature[0] = 'E';
+ head->gpt_signature[1] = 'F';
+ head->gpt_signature[2] = 'I';
+ head->gpt_signature[3] = ' ';
+ head->gpt_signature[4] = 'P';
+ head->gpt_signature[5] = 'A';
+ head->gpt_signature[6] = 'R';
+ head->gpt_signature[7] = 'T';
+ to_le32(&head->header_crc32, 0x30CB7378);
+ to_le64(&gpt->lba_start, 0x0);
+ to_le64(&gpt->lba_end, 0x2E935FFE);
+ to_le64(&head->first_usable_lba, 0xA);
+ to_le64(&head->last_usable_lba, 0xF4240);
+ re = spdk_gpt_parse(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set read_partitions passed, all passed */
+ to_le32(&head->size_of_partition_entry, 0x80);
+ to_le64(&head->partition_entry_lba, 0x20);
+ to_le32(&head->header_crc32, 0xE1A08822);
+ to_le32(&head->partition_entry_array_crc32, 0xEBEE44FB);
+ to_le32(&head->num_partition_entries, 0x80);
+ re = spdk_gpt_parse(gpt);
+ CU_ASSERT(re == 0);
+
+ free(gpt);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("gpt_parse", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "parse",
+ test_parse) == NULL ||
+ CU_add_test(suite, "check mbr",
+ test_check_mbr) == NULL ||
+ CU_add_test(suite, "read header",
+ test_read_header) == NULL ||
+ CU_add_test(suite, "read partitions",
+ test_read_partitions) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/mt/Makefile b/src/spdk/test/unit/lib/bdev/mt/Makefile
new file mode 100644
index 00000000..a19b345a
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/mt/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = bdev.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/bdev/mt/bdev.c/.gitignore b/src/spdk/test/unit/lib/bdev/mt/bdev.c/.gitignore
new file mode 100644
index 00000000..a5a22d0d
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/mt/bdev.c/.gitignore
@@ -0,0 +1 @@
+bdev_ut
diff --git a/src/spdk/test/unit/lib/bdev/mt/bdev.c/Makefile b/src/spdk/test/unit/lib/bdev/mt/bdev.c/Makefile
new file mode 100644
index 00000000..96b48574
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/mt/bdev.c/Makefile
@@ -0,0 +1,41 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.mock.unittest.mk
+
+TEST_FILE = bdev_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c b/src/spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c
new file mode 100644
index 00000000..09740fa9
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c
@@ -0,0 +1,1360 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+#include "common/lib/ut_multithread.c"
+#include "unit/lib/json_mock.c"
+
+#include "spdk/config.h"
+/* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
+#undef SPDK_CONFIG_VTUNE
+
+#include "bdev/bdev.c"
+
+#define BDEV_UT_NUM_THREADS 3
+
+DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io,
+ int *sc, int *sk, int *asc, int *ascq));
+
+DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
+ const char *name), NULL);
+DEFINE_STUB(spdk_conf_section_get_nmval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
+DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
+
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name, const char *short_name,
+ uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_is_ptr, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+
+struct ut_bdev {
+ struct spdk_bdev bdev;
+ void *io_target;
+};
+
+struct ut_bdev_channel {
+ TAILQ_HEAD(, spdk_bdev_io) outstanding_io;
+ uint32_t outstanding_cnt;
+ uint32_t avail_cnt;
+};
+
+int g_io_device;
+struct ut_bdev g_bdev;
+struct spdk_bdev_desc *g_desc;
+bool g_teardown_done = false;
+bool g_get_io_channel = true;
+bool g_create_ch = true;
+bool g_init_complete_called = false;
+bool g_fini_start_called = true;
+
+static int
+stub_create_ch(void *io_device, void *ctx_buf)
+{
+ struct ut_bdev_channel *ch = ctx_buf;
+
+ if (g_create_ch == false) {
+ return -1;
+ }
+
+ TAILQ_INIT(&ch->outstanding_io);
+ ch->outstanding_cnt = 0;
+ /*
+ * When avail gets to 0, the submit_request function will return ENOMEM.
+ * Most tests to not want ENOMEM to occur, so by default set this to a
+ * big value that won't get hit. The ENOMEM tests can then override this
+ * value to something much smaller to induce ENOMEM conditions.
+ */
+ ch->avail_cnt = 2048;
+ return 0;
+}
+
+static void
+stub_destroy_ch(void *io_device, void *ctx_buf)
+{
+}
+
+static struct spdk_io_channel *
+stub_get_io_channel(void *ctx)
+{
+ struct ut_bdev *ut_bdev = ctx;
+
+ if (g_get_io_channel == true) {
+ return spdk_get_io_channel(ut_bdev->io_target);
+ } else {
+ return NULL;
+ }
+}
+
+static int
+stub_destruct(void *ctx)
+{
+ return 0;
+}
+
+static void
+stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
+{
+ struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
+
+ if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
+ struct spdk_bdev_io *io;
+
+ while (!TAILQ_EMPTY(&ch->outstanding_io)) {
+ io = TAILQ_FIRST(&ch->outstanding_io);
+ TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
+ ch->outstanding_cnt--;
+ spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_FAILED);
+ ch->avail_cnt++;
+ }
+ }
+
+ if (ch->avail_cnt > 0) {
+ TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
+ ch->outstanding_cnt++;
+ ch->avail_cnt--;
+ } else {
+ spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
+ }
+}
+
+static uint32_t
+stub_complete_io(void *io_target, uint32_t num_to_complete)
+{
+ struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
+ struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
+ struct spdk_bdev_io *io;
+ bool complete_all = (num_to_complete == 0);
+ uint32_t num_completed = 0;
+
+ while (complete_all || num_completed < num_to_complete) {
+ if (TAILQ_EMPTY(&ch->outstanding_io)) {
+ break;
+ }
+ io = TAILQ_FIRST(&ch->outstanding_io);
+ TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
+ ch->outstanding_cnt--;
+ spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
+ ch->avail_cnt++;
+ num_completed++;
+ }
+
+ spdk_put_io_channel(_ch);
+ return num_completed;
+}
+
+static struct spdk_bdev_fn_table fn_table = {
+ .get_io_channel = stub_get_io_channel,
+ .destruct = stub_destruct,
+ .submit_request = stub_submit_request,
+};
+
+static int
+module_init(void)
+{
+ return 0;
+}
+
+static void
+module_fini(void)
+{
+}
+
+static void
+init_complete(void)
+{
+ g_init_complete_called = true;
+}
+
+static void
+fini_start(void)
+{
+ g_fini_start_called = true;
+}
+
+struct spdk_bdev_module bdev_ut_if = {
+ .name = "bdev_ut",
+ .module_init = module_init,
+ .module_fini = module_fini,
+ .init_complete = init_complete,
+ .fini_start = fini_start,
+};
+
+SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if)
+
+static void
+register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
+{
+ memset(ut_bdev, 0, sizeof(*ut_bdev));
+
+ ut_bdev->io_target = io_target;
+ ut_bdev->bdev.ctxt = ut_bdev;
+ ut_bdev->bdev.name = name;
+ ut_bdev->bdev.fn_table = &fn_table;
+ ut_bdev->bdev.module = &bdev_ut_if;
+ ut_bdev->bdev.blocklen = 4096;
+ ut_bdev->bdev.blockcnt = 1024;
+
+ spdk_bdev_register(&ut_bdev->bdev);
+}
+
+static void
+unregister_bdev(struct ut_bdev *ut_bdev)
+{
+ /* Handle any deferred messages. */
+ poll_threads();
+ spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
+}
+
+static void
+bdev_init_cb(void *done, int rc)
+{
+ CU_ASSERT(rc == 0);
+ *(bool *)done = true;
+}
+
+static void
+setup_test(void)
+{
+ bool done = false;
+
+ allocate_threads(BDEV_UT_NUM_THREADS);
+ set_thread(0);
+ spdk_bdev_initialize(bdev_init_cb, &done);
+ spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
+ sizeof(struct ut_bdev_channel), NULL);
+ register_bdev(&g_bdev, "ut_bdev", &g_io_device);
+ spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
+}
+
+static void
+finish_cb(void *cb_arg)
+{
+ g_teardown_done = true;
+}
+
+static void
+teardown_test(void)
+{
+ set_thread(0);
+ g_teardown_done = false;
+ spdk_bdev_close(g_desc);
+ g_desc = NULL;
+ unregister_bdev(&g_bdev);
+ spdk_io_device_unregister(&g_io_device, NULL);
+ spdk_bdev_finish(finish_cb, NULL);
+ poll_threads();
+ memset(&g_bdev, 0, sizeof(g_bdev));
+ CU_ASSERT(g_teardown_done == true);
+ g_teardown_done = false;
+ free_threads();
+}
+
+static uint32_t
+bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
+{
+ struct spdk_bdev_io *io;
+ uint32_t cnt = 0;
+
+ TAILQ_FOREACH(io, tailq, internal.link) {
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static void
+basic(void)
+{
+ g_init_complete_called = false;
+ setup_test();
+ CU_ASSERT(g_init_complete_called == true);
+
+ set_thread(0);
+
+ g_get_io_channel = false;
+ g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(g_ut_threads[0].ch == NULL);
+
+ g_get_io_channel = true;
+ g_create_ch = false;
+ g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(g_ut_threads[0].ch == NULL);
+
+ g_get_io_channel = true;
+ g_create_ch = true;
+ g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(g_ut_threads[0].ch != NULL);
+ spdk_put_io_channel(g_ut_threads[0].ch);
+
+ g_fini_start_called = false;
+ teardown_test();
+ CU_ASSERT(g_fini_start_called == true);
+}
+
+static void
+_bdev_removed(void *done)
+{
+ *(bool *)done = true;
+}
+
+static void
+_bdev_unregistered(void *done, int rc)
+{
+ CU_ASSERT(rc == 0);
+ *(bool *)done = true;
+}
+
+static void
+unregister_and_close(void)
+{
+ bool done, remove_notify;
+ struct spdk_bdev_desc *desc;
+
+ setup_test();
+ set_thread(0);
+
+ /* setup_test() automatically opens the bdev,
+ * but this test needs to do that in a different
+ * way. */
+ spdk_bdev_close(g_desc);
+ poll_threads();
+
+ remove_notify = false;
+ spdk_bdev_open(&g_bdev.bdev, true, _bdev_removed, &remove_notify, &desc);
+ CU_ASSERT(remove_notify == false);
+ CU_ASSERT(desc != NULL);
+
+ /* There is an open descriptor on the device. Unregister it,
+ * which can't proceed until the descriptor is closed. */
+ done = false;
+ spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
+ /* No polling has occurred, so neither of these should execute */
+ CU_ASSERT(remove_notify == false);
+ CU_ASSERT(done == false);
+
+ /* Prior to the unregister completing, close the descriptor */
+ spdk_bdev_close(desc);
+
+ /* Poll the threads to allow all events to be processed */
+ poll_threads();
+
+ /* Remove notify should not have been called because the
+ * descriptor is already closed. */
+ CU_ASSERT(remove_notify == false);
+
+ /* The unregister should have completed */
+ CU_ASSERT(done == true);
+
+ spdk_bdev_finish(finish_cb, NULL);
+ poll_threads();
+ free_threads();
+}
+
+static void
+reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ bool *done = cb_arg;
+
+ CU_ASSERT(success == true);
+ *done = true;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+put_channel_during_reset(void)
+{
+ struct spdk_io_channel *io_ch;
+ bool done = false;
+
+ setup_test();
+
+ set_thread(0);
+ io_ch = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(io_ch != NULL);
+
+ /*
+ * Start a reset, but then put the I/O channel before
+ * the deferred messages for the reset get a chance to
+ * execute.
+ */
+ spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
+ spdk_put_io_channel(io_ch);
+ poll_threads();
+ stub_complete_io(g_bdev.io_target, 0);
+
+ teardown_test();
+}
+
+static void
+aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ enum spdk_bdev_io_status *status = cb_arg;
+
+ *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+aborted_reset(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING,
+ status2 = SPDK_BDEV_IO_STATUS_PENDING;
+
+ setup_test();
+
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(io_ch[0] != NULL);
+ spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
+ poll_threads();
+ CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
+
+ /*
+ * First reset has been submitted on ch0. Now submit a second
+ * reset on ch1 which will get queued since there is already a
+ * reset in progress.
+ */
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(io_ch[1] != NULL);
+ spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
+ poll_threads();
+ CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
+
+ /*
+ * Now destroy ch1. This will abort the queued reset. Check that
+ * the second reset was completed with failed status. Also check
+ * that bdev->internal.reset_in_progress != NULL, since the
+ * original reset has not been completed yet. This ensures that
+ * the bdev code is correctly noticing that the failed reset is
+ * *not* the one that had been submitted to the bdev module.
+ */
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ poll_threads();
+ CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
+
+ /*
+ * Now complete the first reset, verify that it completed with SUCCESS
+ * status and that bdev->internal.reset_in_progress is also set back to NULL.
+ */
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
+
+ teardown_test();
+}
+
+static void
+io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ enum spdk_bdev_io_status *status = cb_arg;
+
+ *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+io_during_reset(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ struct spdk_bdev_channel *bdev_ch[2];
+ enum spdk_bdev_io_status status0, status1, status_reset;
+ int rc;
+
+ setup_test();
+
+ /*
+ * First test normal case - submit an I/O on each of two channels (with no resets)
+ * and verify they complete successfully.
+ */
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == 0);
+ status0 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
+ CU_ASSERT(rc == 0);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == 0);
+ status1 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
+ CU_ASSERT(rc == 0);
+
+ poll_threads();
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
+
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ set_thread(1);
+ stub_complete_io(g_bdev.io_target, 0);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /*
+ * Now submit a reset, and leave it pending while we submit I/O on two different
+ * channels. These I/O should be failed by the bdev layer since the reset is in
+ * progress.
+ */
+ set_thread(0);
+ status_reset = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(bdev_ch[0]->flags == 0);
+ CU_ASSERT(bdev_ch[1]->flags == 0);
+ poll_threads();
+ CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
+ CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
+
+ set_thread(0);
+ status0 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
+ CU_ASSERT(rc == 0);
+
+ set_thread(1);
+ status1 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
+ CU_ASSERT(rc == 0);
+
+ /*
+ * A reset is in progress so these read I/O should complete with failure. Note that we
+ * need to poll_threads() since I/O completed inline have their completion deferred.
+ */
+ poll_threads();
+ CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
+
+ /*
+ * Complete the reset
+ */
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+
+ /*
+ * Only poll thread 0. We should not get a completion.
+ */
+ poll_thread(0);
+ CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
+
+ /*
+ * Poll both thread 0 and 1 so the messages can propagate and we
+ * get a completion.
+ */
+ poll_threads();
+ CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ spdk_put_io_channel(io_ch[0]);
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ poll_threads();
+
+ teardown_test();
+}
+
+static void
+basic_qos(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ struct spdk_bdev_channel *bdev_ch[2];
+ struct spdk_bdev *bdev;
+ enum spdk_bdev_io_status status;
+ int rc;
+
+ setup_test();
+
+ /* Enable QoS */
+ bdev = &g_bdev.bdev;
+ bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
+ SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
+ TAILQ_INIT(&bdev->internal.qos->queued);
+ /*
+ * Enable both IOPS and bandwidth rate limits.
+ * In this case, both rate limits will take equal effect.
+ */
+ /* 2000 I/O per second, or 2 per millisecond */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
+ /* 8K byte per millisecond with 4K block size */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
+
+ g_get_io_channel = true;
+
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
+
+ /*
+ * Send an I/O on thread 0, which is where the QoS thread is running.
+ */
+ set_thread(0);
+ status = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_threads();
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Send an I/O on thread 1. The QoS thread is not running here. */
+ status = SPDK_BDEV_IO_STATUS_PENDING;
+ set_thread(1);
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_threads();
+ /* Complete I/O on thread 1. This should not complete the I/O we submitted */
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
+ /* Now complete I/O on thread 0 */
+ set_thread(0);
+ poll_threads();
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Tear down the channels */
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ poll_threads();
+ set_thread(0);
+
+ /* Close the descriptor, which should stop the qos channel */
+ spdk_bdev_close(g_desc);
+ poll_threads();
+ CU_ASSERT(bdev->internal.qos->ch == NULL);
+
+ spdk_bdev_open(bdev, true, NULL, NULL, &g_desc);
+
+ /* Create the channels in reverse order. */
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
+
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
+
+ /* Confirm that the qos thread is now thread 1 */
+ CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]);
+
+ /* Tear down the channels */
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ poll_threads();
+
+ set_thread(0);
+
+ teardown_test();
+}
+
+static void
+io_during_qos_queue(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ struct spdk_bdev_channel *bdev_ch[2];
+ struct spdk_bdev *bdev;
+ enum spdk_bdev_io_status status0, status1;
+ int rc;
+
+ setup_test();
+ reset_time();
+
+ /* Enable QoS */
+ bdev = &g_bdev.bdev;
+ bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
+ SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
+ TAILQ_INIT(&bdev->internal.qos->queued);
+ /*
+ * Enable both IOPS and bandwidth rate limits.
+ * In this case, IOPS rate limit will take effect first.
+ */
+ /* 1000 I/O per second, or 1 per millisecond */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 1000;
+ /* 8K byte per millisecond with 4K block size */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
+
+ g_get_io_channel = true;
+
+ /* Create channels */
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
+
+ /* Send two I/O */
+ status1 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
+ set_thread(0);
+ status0 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
+
+ /* Complete any I/O that arrived at the disk */
+ poll_threads();
+ set_thread(1);
+ stub_complete_io(g_bdev.io_target, 0);
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+
+ /* Only one of the I/O should complete. (logical XOR) */
+ if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) {
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
+ } else {
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
+ }
+
+ /* Advance in time by a millisecond */
+ increment_time(1000);
+
+ /* Complete more I/O */
+ poll_threads();
+ set_thread(1);
+ stub_complete_io(g_bdev.io_target, 0);
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+
+ /* Now the second I/O should be done */
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Tear down the channels */
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ poll_threads();
+
+ teardown_test();
+}
+
+static void
+io_during_qos_reset(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ struct spdk_bdev_channel *bdev_ch[2];
+ struct spdk_bdev *bdev;
+ enum spdk_bdev_io_status status0, status1, reset_status;
+ int rc;
+
+ setup_test();
+ reset_time();
+
+ /* Enable QoS */
+ bdev = &g_bdev.bdev;
+ bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
+ SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
+ TAILQ_INIT(&bdev->internal.qos->queued);
+ /*
+ * Enable both IOPS and bandwidth rate limits.
+ * In this case, bandwidth rate limit will take effect first.
+ */
+ /* 2000 I/O per second, or 2 per millisecond */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
+ /* 4K byte per millisecond with 4K block size */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 4096000;
+
+ g_get_io_channel = true;
+
+ /* Create channels */
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
+
+ /* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */
+ status1 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
+ CU_ASSERT(rc == 0);
+ set_thread(0);
+ status0 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
+ CU_ASSERT(rc == 0);
+
+ poll_threads();
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
+
+ /* Reset the bdev. */
+ reset_status = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status);
+ CU_ASSERT(rc == 0);
+
+ /* Complete any I/O that arrived at the disk */
+ poll_threads();
+ set_thread(1);
+ stub_complete_io(g_bdev.io_target, 0);
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+
+ CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
+
+ /* Tear down the channels */
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ poll_threads();
+
+ teardown_test();
+}
+
+static void
+enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ enum spdk_bdev_io_status *status = cb_arg;
+
+ *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+enomem(void)
+{
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *bdev_ch;
+ struct spdk_bdev_shared_resource *shared_resource;
+ struct ut_bdev_channel *ut_ch;
+ const uint32_t IO_ARRAY_SIZE = 64;
+ const uint32_t AVAIL = 20;
+ enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
+ uint32_t nomem_cnt, i;
+ struct spdk_bdev_io *first_io;
+ int rc;
+
+ setup_test();
+
+ set_thread(0);
+ io_ch = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch = spdk_io_channel_get_ctx(io_ch);
+ shared_resource = bdev_ch->shared_resource;
+ ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
+ ut_ch->avail_cnt = AVAIL;
+
+ /* First submit a number of IOs equal to what the channel can support. */
+ for (i = 0; i < AVAIL; i++) {
+ status[i] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
+ CU_ASSERT(rc == 0);
+ }
+ CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
+
+ /*
+ * Next, submit one additional I/O. This one should fail with ENOMEM and then go onto
+ * the enomem_io list.
+ */
+ status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
+ first_io = TAILQ_FIRST(&shared_resource->nomem_io);
+
+ /*
+ * Now submit a bunch more I/O. These should all fail with ENOMEM and get queued behind
+ * the first_io above.
+ */
+ for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
+ status[i] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
+ CU_ASSERT(rc == 0);
+ }
+
+ /* Assert that first_io is still at the head of the list. */
+ CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io);
+ CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
+ nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
+ CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
+
+ /*
+ * Complete 1 I/O only. The key check here is bdev_io_tailq_cnt - this should not have
+ * changed since completing just 1 I/O should not trigger retrying the queued nomem_io
+ * list.
+ */
+ stub_complete_io(g_bdev.io_target, 1);
+ CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
+
+ /*
+ * Complete enough I/O to hit the nomem_theshold. This should trigger retrying nomem_io,
+ * and we should see I/O get resubmitted to the test bdev module.
+ */
+ stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
+ CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt);
+ nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
+
+ /* Complete 1 I/O only. This should not trigger retrying the queued nomem_io. */
+ stub_complete_io(g_bdev.io_target, 1);
+ CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
+
+ /*
+ * Send a reset and confirm that all I/O are completed, including the ones that
+ * were queued on the nomem_io list.
+ */
+ status_reset = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
+ poll_threads();
+ CU_ASSERT(rc == 0);
+ /* This will complete the reset. */
+ stub_complete_io(g_bdev.io_target, 0);
+
+ CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0);
+ CU_ASSERT(shared_resource->io_outstanding == 0);
+
+ spdk_put_io_channel(io_ch);
+ poll_threads();
+ teardown_test();
+}
+
+static void
+enomem_multi_bdev(void)
+{
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *bdev_ch;
+ struct spdk_bdev_shared_resource *shared_resource;
+ struct ut_bdev_channel *ut_ch;
+ const uint32_t IO_ARRAY_SIZE = 64;
+ const uint32_t AVAIL = 20;
+ enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
+ uint32_t i;
+ struct ut_bdev *second_bdev;
+ struct spdk_bdev_desc *second_desc = NULL;
+ struct spdk_bdev_channel *second_bdev_ch;
+ struct spdk_io_channel *second_ch;
+ int rc;
+
+ setup_test();
+
+ /* Register second bdev with the same io_target */
+ second_bdev = calloc(1, sizeof(*second_bdev));
+ SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
+ register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
+ spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
+ SPDK_CU_ASSERT_FATAL(second_desc != NULL);
+
+ set_thread(0);
+ io_ch = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch = spdk_io_channel_get_ctx(io_ch);
+ shared_resource = bdev_ch->shared_resource;
+ ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
+ ut_ch->avail_cnt = AVAIL;
+
+ second_ch = spdk_bdev_get_io_channel(second_desc);
+ second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
+ SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource);
+
+ /* Saturate io_target through bdev A. */
+ for (i = 0; i < AVAIL; i++) {
+ status[i] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
+ CU_ASSERT(rc == 0);
+ }
+ CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
+
+ /*
+ * Now submit I/O through the second bdev. This should fail with ENOMEM
+ * and then go onto the nomem_io list.
+ */
+ status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
+
+ /* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
+ stub_complete_io(g_bdev.io_target, AVAIL);
+
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
+ CU_ASSERT(shared_resource->io_outstanding == 1);
+
+ /* Now complete our retried I/O */
+ stub_complete_io(g_bdev.io_target, 1);
+ SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
+
+ spdk_put_io_channel(io_ch);
+ spdk_put_io_channel(second_ch);
+ spdk_bdev_close(second_desc);
+ unregister_bdev(second_bdev);
+ poll_threads();
+ free(second_bdev);
+ teardown_test();
+}
+
+
+static void
+enomem_multi_io_target(void)
+{
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *bdev_ch;
+ struct ut_bdev_channel *ut_ch;
+ const uint32_t IO_ARRAY_SIZE = 64;
+ const uint32_t AVAIL = 20;
+ enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
+ uint32_t i;
+ int new_io_device;
+ struct ut_bdev *second_bdev;
+ struct spdk_bdev_desc *second_desc = NULL;
+ struct spdk_bdev_channel *second_bdev_ch;
+ struct spdk_io_channel *second_ch;
+ int rc;
+
+ setup_test();
+
+ /* Create new io_target and a second bdev using it */
+ spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch,
+ sizeof(struct ut_bdev_channel), NULL);
+ second_bdev = calloc(1, sizeof(*second_bdev));
+ SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
+ register_bdev(second_bdev, "ut_bdev2", &new_io_device);
+ spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
+ SPDK_CU_ASSERT_FATAL(second_desc != NULL);
+
+ set_thread(0);
+ io_ch = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch = spdk_io_channel_get_ctx(io_ch);
+ ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
+ ut_ch->avail_cnt = AVAIL;
+
+ /* Different io_target should imply a different shared_resource */
+ second_ch = spdk_bdev_get_io_channel(second_desc);
+ second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
+ SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource);
+
+ /* Saturate io_target through bdev A. */
+ for (i = 0; i < AVAIL; i++) {
+ status[i] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
+ CU_ASSERT(rc == 0);
+ }
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
+
+ /* Issue one more I/O to fill ENOMEM list. */
+ status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
+
+ /*
+ * Now submit I/O through the second bdev. This should go through and complete
+ * successfully because we're using a different io_device underneath.
+ */
+ status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io));
+ stub_complete_io(second_bdev->io_target, 1);
+
+ /* Cleanup; Complete outstanding I/O. */
+ stub_complete_io(g_bdev.io_target, AVAIL);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
+ /* Complete the ENOMEM I/O */
+ stub_complete_io(g_bdev.io_target, 1);
+ CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
+
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
+ CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
+ spdk_put_io_channel(io_ch);
+ spdk_put_io_channel(second_ch);
+ spdk_bdev_close(second_desc);
+ unregister_bdev(second_bdev);
+ spdk_io_device_unregister(&new_io_device, NULL);
+ poll_threads();
+ free(second_bdev);
+ teardown_test();
+}
+
+static void
+qos_dynamic_enable_done(void *cb_arg, int status)
+{
+ int *rc = cb_arg;
+ *rc = status;
+}
+
+static void
+qos_dynamic_enable(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ struct spdk_bdev_channel *bdev_ch[2];
+ struct spdk_bdev *bdev;
+ enum spdk_bdev_io_status bdev_io_status[2];
+ uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES] = {};
+ int status, second_status, rc, i;
+
+ setup_test();
+ reset_time();
+
+ for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
+ limits[i] = UINT64_MAX;
+ }
+
+ bdev = &g_bdev.bdev;
+
+ g_get_io_channel = true;
+
+ /* Create channels */
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == 0);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == 0);
+
+ set_thread(0);
+
+ /*
+ * Enable QoS: IOPS and byte per second rate limits.
+ * More than 10 I/Os allowed per timeslice.
+ */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
+ limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 100;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
+
+ /*
+ * Submit and complete 10 I/O to fill the QoS allotment for this timeslice.
+ * Additional I/O will then be queued.
+ */
+ set_thread(0);
+ for (i = 0; i < 10; i++) {
+ bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
+ }
+
+ /*
+ * Send two more I/O. These I/O will be queued since the current timeslice allotment has been
+ * filled already. We want to test that when QoS is disabled that these two I/O:
+ * 1) are not aborted
+ * 2) are sent back to their original thread for resubmission
+ */
+ bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
+ set_thread(1);
+ bdev_io_status[1] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &bdev_io_status[1]);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_threads();
+
+ /* Disable QoS: IOPS rate limit */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
+
+ /* Disable QoS: Byte per second rate limit */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 0;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
+
+ /*
+ * All I/O should have been resubmitted back on their original thread. Complete
+ * all I/O on thread 0, and ensure that only the thread 0 I/O was completed.
+ */
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
+
+ /* Now complete all I/O on thread 1 and ensure the thread 1 I/O was completed. */
+ set_thread(1);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Disable QoS again */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0); /* This should succeed */
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
+
+ /* Enable QoS on thread 0 */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
+
+ /* Disable QoS on thread 1 */
+ set_thread(1);
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ /* Don't poll yet. This should leave the channels with QoS enabled */
+ CU_ASSERT(status == -1);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
+
+ /* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */
+ second_status = 0;
+ limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 10;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &second_status);
+ poll_threads();
+ CU_ASSERT(status == 0); /* The disable should succeed */
+ CU_ASSERT(second_status < 0); /* The enable should fail */
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
+
+ /* Enable QoS on thread 1. This should succeed now that the disable has completed. */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
+
+ /* Tear down the channels */
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ poll_threads();
+
+ set_thread(0);
+ teardown_test();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("bdev", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "basic", basic) == NULL ||
+ CU_add_test(suite, "unregister_and_close", unregister_and_close) == NULL ||
+ CU_add_test(suite, "basic_qos", basic_qos) == NULL ||
+ CU_add_test(suite, "put_channel_during_reset", put_channel_during_reset) == NULL ||
+ CU_add_test(suite, "aborted_reset", aborted_reset) == NULL ||
+ CU_add_test(suite, "io_during_reset", io_during_reset) == NULL ||
+ CU_add_test(suite, "io_during_qos_queue", io_during_qos_queue) == NULL ||
+ CU_add_test(suite, "io_during_qos_reset", io_during_qos_reset) == NULL ||
+ CU_add_test(suite, "enomem", enomem) == NULL ||
+ CU_add_test(suite, "enomem_multi_bdev", enomem_multi_bdev) == NULL ||
+ CU_add_test(suite, "enomem_multi_io_target", enomem_multi_io_target) == NULL ||
+ CU_add_test(suite, "qos_dynamic_enable", qos_dynamic_enable) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/part.c/.gitignore b/src/spdk/test/unit/lib/bdev/part.c/.gitignore
new file mode 100644
index 00000000..c8302779
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/part.c/.gitignore
@@ -0,0 +1 @@
+part_ut
diff --git a/src/spdk/test/unit/lib/bdev/part.c/Makefile b/src/spdk/test/unit/lib/bdev/part.c/Makefile
new file mode 100644
index 00000000..9073c5cd
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/part.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = part_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/part.c/part_ut.c b/src/spdk/test/unit/lib/bdev/part.c/part_ut.c
new file mode 100644
index 00000000..fd251f4c
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/part.c/part_ut.c
@@ -0,0 +1,179 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+#include "unit/lib/json_mock.c"
+
+#include "spdk/config.h"
+/* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
+#undef SPDK_CONFIG_VTUNE
+
+#include "bdev/bdev.c"
+#include "bdev/part.c"
+
+DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
+ const char *name), NULL);
+DEFINE_STUB(spdk_conf_section_get_nmval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
+DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
+
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name, const char *short_name,
+ uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_is_ptr, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+
+static void
+_part_send_msg(spdk_thread_fn fn, void *ctx, void *thread_ctx)
+{
+ fn(ctx);
+}
+
+static void
+_part_cleanup(struct spdk_bdev_part *part)
+{
+ free(part->internal.bdev.name);
+ free(part->internal.bdev.product_name);
+}
+
+void
+spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
+ int *sc, int *sk, int *asc, int *ascq)
+{
+}
+
+struct spdk_bdev_module bdev_ut_if = {
+ .name = "bdev_ut",
+};
+
+static void vbdev_ut_examine(struct spdk_bdev *bdev);
+
+struct spdk_bdev_module vbdev_ut_if = {
+ .name = "vbdev_ut",
+ .examine_config = vbdev_ut_examine,
+};
+
+SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if)
+SPDK_BDEV_MODULE_REGISTER(&vbdev_ut_if)
+
+static void
+vbdev_ut_examine(struct spdk_bdev *bdev)
+{
+ spdk_bdev_module_examine_done(&vbdev_ut_if);
+}
+
+static int
+__destruct(void *ctx)
+{
+ return 0;
+}
+
+static struct spdk_bdev_fn_table base_fn_table = {
+ .destruct = __destruct,
+};
+static struct spdk_bdev_fn_table part_fn_table = {
+ .destruct = __destruct,
+};
+
+static void
+part_test(void)
+{
+ struct spdk_bdev_part_base *base;
+ struct spdk_bdev_part part1 = {};
+ struct spdk_bdev_part part2 = {};
+ struct spdk_bdev bdev_base = {};
+ SPDK_BDEV_PART_TAILQ tailq = TAILQ_HEAD_INITIALIZER(tailq);
+ int rc;
+
+ bdev_base.name = "base";
+ bdev_base.fn_table = &base_fn_table;
+ bdev_base.module = &bdev_ut_if;
+ rc = spdk_bdev_register(&bdev_base);
+ CU_ASSERT(rc == 0);
+ base = spdk_bdev_part_base_construct(&bdev_base, NULL, &vbdev_ut_if,
+ &part_fn_table, &tailq, NULL,
+ NULL, 0, NULL, NULL);
+
+ SPDK_CU_ASSERT_FATAL(base != NULL);
+
+ rc = spdk_bdev_part_construct(&part1, base, "test1", 0, 100, "test");
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ rc = spdk_bdev_part_construct(&part2, base, "test2", 100, 100, "test");
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ spdk_bdev_part_base_hotremove(&bdev_base, &tailq);
+
+ spdk_bdev_part_base_free(base);
+ _part_cleanup(&part1);
+ _part_cleanup(&part2);
+ spdk_bdev_unregister(&bdev_base, NULL, NULL);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("bdev_part", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "part", part_test) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ spdk_allocate_thread(_part_send_msg, NULL, NULL, NULL, "thread0");
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ spdk_free_thread();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/pmem/.gitignore b/src/spdk/test/unit/lib/bdev/pmem/.gitignore
new file mode 100644
index 00000000..b2e0df1e
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/pmem/.gitignore
@@ -0,0 +1 @@
+bdev_pmem_ut
diff --git a/src/spdk/test/unit/lib/bdev/pmem/Makefile b/src/spdk/test/unit/lib/bdev/pmem/Makefile
new file mode 100644
index 00000000..9c0e7dc1
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/pmem/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = bdev_pmem_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/pmem/bdev_pmem_ut.c b/src/spdk/test/unit/lib/bdev/pmem/bdev_pmem_ut.c
new file mode 100644
index 00000000..742ec638
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/pmem/bdev_pmem_ut.c
@@ -0,0 +1,783 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+#include "unit/lib/json_mock.c"
+
+#include "bdev/pmem/bdev_pmem.c"
+
+DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *,
+ (struct spdk_conf *cp, const char *name), NULL);
+DEFINE_STUB(spdk_conf_section_get_nval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx), NULL);
+DEFINE_STUB(spdk_conf_section_get_nmval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
+
+static struct spdk_bdev_module *g_bdev_pmem_module;
+static int g_bdev_module_cnt;
+
+struct pmemblk {
+ const char *name;
+ bool is_open;
+ bool is_consistent;
+ size_t bsize;
+ long long nblock;
+
+ uint8_t *buffer;
+};
+
+static const char *g_bdev_name = "pmem0";
+
+/* PMEMblkpool is a typedef of struct pmemblk */
+static PMEMblkpool g_pool_ok = {
+ .name = "/pools/ok_pool",
+ .is_open = false,
+ .is_consistent = true,
+ .bsize = 4096,
+ .nblock = 150
+};
+
+static PMEMblkpool g_pool_nblock_0 = {
+ .name = "/pools/nblock_0",
+ .is_open = false,
+ .is_consistent = true,
+ .bsize = 4096,
+ .nblock = 0
+};
+
+static PMEMblkpool g_pool_bsize_0 = {
+ .name = "/pools/nblock_0",
+ .is_open = false,
+ .is_consistent = true,
+ .bsize = 0,
+ .nblock = 100
+};
+
+static PMEMblkpool g_pool_inconsistent = {
+ .name = "/pools/inconsistent",
+ .is_open = false,
+ .is_consistent = false,
+ .bsize = 512,
+ .nblock = 1
+};
+
+static int g_opened_pools;
+static struct spdk_bdev *g_bdev;
+static const char *g_check_version_msg;
+static bool g_pmemblk_open_allow_open = true;
+
+static void
+_pmem_send_msg(spdk_thread_fn fn, void *ctx, void *thread_ctx)
+{
+ fn(ctx);
+}
+
+static PMEMblkpool *
+find_pmemblk_pool(const char *path)
+{
+ if (path == NULL) {
+ errno = EINVAL;
+ return NULL;
+ } else if (strcmp(g_pool_ok.name, path) == 0) {
+ return &g_pool_ok;
+ } else if (strcmp(g_pool_nblock_0.name, path) == 0) {
+ return &g_pool_nblock_0;
+ } else if (strcmp(g_pool_bsize_0.name, path) == 0) {
+ return &g_pool_bsize_0;
+ } else if (strcmp(g_pool_inconsistent.name, path) == 0) {
+ return &g_pool_inconsistent;
+ }
+
+ errno = ENOENT;
+ return NULL;
+}
+
+PMEMblkpool *
+pmemblk_open(const char *path, size_t bsize)
+{
+ PMEMblkpool *pool;
+
+ if (!g_pmemblk_open_allow_open) {
+ errno = EIO;
+ return NULL;
+ }
+
+ pool = find_pmemblk_pool(path);
+ if (!pool) {
+ errno = ENOENT;
+ return NULL;
+ }
+
+ CU_ASSERT_TRUE_FATAL(pool->is_consistent);
+ CU_ASSERT_FALSE(pool->is_open);
+ if (pool->is_open == false) {
+ pool->is_open = true;
+ g_opened_pools++;
+ } else {
+ errno = EBUSY;
+ pool = NULL;
+ }
+
+ return pool;
+}
+void
+spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
+{
+ cb(NULL, bdev_io);
+}
+
+static void
+check_open_pool_fatal(PMEMblkpool *pool)
+{
+ SPDK_CU_ASSERT_FATAL(pool != NULL);
+ SPDK_CU_ASSERT_FATAL(find_pmemblk_pool(pool->name) == pool);
+ SPDK_CU_ASSERT_FATAL(pool->is_open == true);
+}
+
+void
+pmemblk_close(PMEMblkpool *pool)
+{
+ check_open_pool_fatal(pool);
+ pool->is_open = false;
+ CU_ASSERT(g_opened_pools > 0);
+ g_opened_pools--;
+}
+
+size_t
+pmemblk_bsize(PMEMblkpool *pool)
+{
+ check_open_pool_fatal(pool);
+ return pool->bsize;
+}
+
+size_t
+pmemblk_nblock(PMEMblkpool *pool)
+{
+ check_open_pool_fatal(pool);
+ return pool->nblock;
+}
+
+int
+pmemblk_read(PMEMblkpool *pool, void *buf, long long blockno)
+{
+ check_open_pool_fatal(pool);
+ if (blockno >= pool->nblock) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ memcpy(buf, &pool->buffer[blockno * pool->bsize], pool->bsize);
+ return 0;
+}
+
+int
+pmemblk_write(PMEMblkpool *pool, const void *buf, long long blockno)
+{
+ check_open_pool_fatal(pool);
+ if (blockno >= pool->nblock) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ memcpy(&pool->buffer[blockno * pool->bsize], buf, pool->bsize);
+ return 0;
+}
+
+int
+pmemblk_set_zero(PMEMblkpool *pool, long long blockno)
+{
+ check_open_pool_fatal(pool);
+ if (blockno >= pool->nblock) {
+
+ errno = EINVAL;
+ return -1;
+ }
+
+ memset(&pool->buffer[blockno * pool->bsize], 0, pool->bsize);
+ return 0;
+}
+
+const char *
+pmemblk_errormsg(void)
+{
+ return strerror(errno);
+}
+
+const char *
+pmemblk_check_version(unsigned major_required, unsigned minor_required)
+{
+ return g_check_version_msg;
+}
+
+int
+pmemblk_check(const char *path, size_t bsize)
+{
+ PMEMblkpool *pool = find_pmemblk_pool(path);
+
+ if (!pool) {
+ errno = ENOENT;
+ return -1;
+ }
+
+ if (!pool->is_consistent) {
+ /* errno ? */
+ return 0;
+ }
+
+ if (bsize != 0 && pool->bsize != bsize) {
+ /* errno ? */
+ return 0;
+ }
+
+ return 1;
+}
+
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+ bdev_io->internal.status = status;
+}
+
+int
+spdk_bdev_register(struct spdk_bdev *bdev)
+{
+ CU_ASSERT_PTR_NULL(g_bdev);
+ g_bdev = bdev;
+
+ return 0;
+}
+
+void
+spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
+{
+}
+
+void
+spdk_bdev_module_finish_done(void)
+{
+}
+
+int
+spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
+{
+ bdev->blockcnt = size;
+ return 0;
+}
+
+static void
+ut_bdev_pmem_destruct(struct spdk_bdev *bdev)
+{
+ SPDK_CU_ASSERT_FATAL(g_bdev != NULL);
+ CU_ASSERT_EQUAL(bdev_pmem_destruct(bdev->ctxt), 0);
+ g_bdev = NULL;
+}
+
+void
+spdk_bdev_module_list_add(struct spdk_bdev_module *bdev_module)
+{
+ g_bdev_pmem_module = bdev_module;
+ g_bdev_module_cnt++;
+}
+
+static int
+bdev_submit_request(struct spdk_bdev *bdev, int16_t io_type, uint64_t offset_blocks,
+ uint64_t num_blocks, struct iovec *iovs, size_t iov_cnt)
+{
+ struct spdk_bdev_io bio = { 0 };
+
+ switch (io_type) {
+ case SPDK_BDEV_IO_TYPE_READ:
+ bio.u.bdev.iovs = iovs;
+ bio.u.bdev.iovcnt = iov_cnt;
+ bio.u.bdev.offset_blocks = offset_blocks;
+ bio.u.bdev.num_blocks = num_blocks;
+ break;
+ case SPDK_BDEV_IO_TYPE_WRITE:
+ bio.u.bdev.iovs = iovs;
+ bio.u.bdev.iovcnt = iov_cnt;
+ bio.u.bdev.offset_blocks = offset_blocks;
+ bio.u.bdev.num_blocks = num_blocks;
+ break;
+ case SPDK_BDEV_IO_TYPE_FLUSH:
+ bio.u.bdev.offset_blocks = offset_blocks;
+ bio.u.bdev.num_blocks = num_blocks;
+ break;
+ case SPDK_BDEV_IO_TYPE_RESET:
+ break;
+ case SPDK_BDEV_IO_TYPE_UNMAP:
+ bio.u.bdev.offset_blocks = offset_blocks;
+ bio.u.bdev.num_blocks = num_blocks;
+ break;
+ case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
+ bio.u.bdev.offset_blocks = offset_blocks;
+ bio.u.bdev.num_blocks = num_blocks;
+ break;
+ default:
+ CU_FAIL_FATAL("BUG:Unexpected IO type");
+ break;
+ }
+
+ /*
+ * Set status to value that shouldn't be returned
+ */
+ bio.type = io_type;
+ bio.internal.status = SPDK_BDEV_IO_STATUS_PENDING;
+ bio.bdev = bdev;
+ bdev_pmem_submit_request(NULL, &bio);
+ return bio.internal.status;
+}
+
+
+static int
+ut_pmem_blk_clean(void)
+{
+ free(g_pool_ok.buffer);
+ g_pool_ok.buffer = NULL;
+
+ /* Unload module to free IO channel */
+ g_bdev_pmem_module->module_fini();
+
+ spdk_free_thread();
+
+ return 0;
+}
+
+static int
+ut_pmem_blk_init(void)
+{
+ errno = 0;
+
+ spdk_allocate_thread(_pmem_send_msg, NULL, NULL, NULL, NULL);
+
+ g_pool_ok.buffer = calloc(g_pool_ok.nblock, g_pool_ok.bsize);
+ if (g_pool_ok.buffer == NULL) {
+ ut_pmem_blk_clean();
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+ut_pmem_init(void)
+{
+ SPDK_CU_ASSERT_FATAL(g_bdev_pmem_module != NULL);
+ CU_ASSERT_EQUAL(g_bdev_module_cnt, 1);
+
+ /* Make pmemblk_check_version fail with provided error message */
+ g_check_version_msg = "TEST FAIL MESSAGE";
+ CU_ASSERT_NOT_EQUAL(g_bdev_pmem_module->module_init(), 0);
+
+ /* This init must success */
+ g_check_version_msg = NULL;
+ CU_ASSERT_EQUAL(g_bdev_pmem_module->module_init(), 0);
+}
+
+static void
+ut_pmem_open_close(void)
+{
+ struct spdk_bdev *bdev = NULL;
+ int pools_cnt;
+ int rc;
+
+ pools_cnt = g_opened_pools;
+
+ /* Try opening with NULL name */
+ rc = spdk_create_pmem_disk(NULL, NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open non-existent pool */
+ rc = spdk_create_pmem_disk("non existent pool", NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open inconsistent pool */
+ rc = spdk_create_pmem_disk(g_pool_inconsistent.name, NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open consistent pool fail the open from unknown reason. */
+ g_pmemblk_open_allow_open = false;
+ rc = spdk_create_pmem_disk(g_pool_inconsistent.name, NULL, &bdev);
+ g_pmemblk_open_allow_open = true;
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open pool with nblocks = 0 */
+ rc = spdk_create_pmem_disk(g_pool_nblock_0.name, NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open pool with bsize = 0 */
+ rc = spdk_create_pmem_disk(g_pool_bsize_0.name, NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open pool with NULL name */
+ rc = spdk_create_pmem_disk(g_pool_ok.name, NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open good pool */
+ rc = spdk_create_pmem_disk(g_pool_ok.name, g_bdev_name, &bdev);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ CU_ASSERT_TRUE(g_pool_ok.is_open);
+ CU_ASSERT_EQUAL(pools_cnt + 1, g_opened_pools);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /* Now remove this bdev */
+ ut_bdev_pmem_destruct(bdev);
+ CU_ASSERT_FALSE(g_pool_ok.is_open);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+}
+
+static void
+ut_pmem_write_read(void)
+{
+ uint8_t *write_buf, *read_buf;
+ struct spdk_bdev *bdev;
+ int rc;
+ size_t unaligned_aligned_size = 100;
+ size_t buf_size = g_pool_ok.bsize * g_pool_ok.nblock;
+ size_t i;
+ const uint64_t nblock_offset = 10;
+ uint64_t offset;
+ size_t io_size, nblock, total_io_size, bsize;
+
+ bsize = 4096;
+ struct iovec iov[] = {
+ { 0, 2 * bsize },
+ { 0, 3 * bsize },
+ { 0, 4 * bsize },
+ };
+
+ rc = spdk_create_pmem_disk(g_pool_ok.name, g_bdev_name, &bdev);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ SPDK_CU_ASSERT_FATAL(g_pool_ok.nblock > 40);
+
+ write_buf = calloc(1, buf_size);
+ read_buf = calloc(1, buf_size);
+
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ SPDK_CU_ASSERT_FATAL(write_buf != NULL);
+ SPDK_CU_ASSERT_FATAL(read_buf != NULL);
+
+ total_io_size = 0;
+ offset = nblock_offset * g_pool_ok.bsize;
+ for (i = 0; i < 3; i++) {
+ iov[i].iov_base = &write_buf[offset + total_io_size];
+ total_io_size += iov[i].iov_len;
+ }
+
+ for (i = 0; i < total_io_size + unaligned_aligned_size; i++) {
+ write_buf[offset + i] = 0x42 + i;
+ }
+
+ SPDK_CU_ASSERT_FATAL(total_io_size < buf_size);
+
+ /*
+ * Write outside pool.
+ */
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, g_pool_ok.nblock, 1, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+
+ /*
+ * Write with insufficient IOV buffers length.
+ */
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, 0, g_pool_ok.nblock, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+
+ /*
+ * Try to write two IOV with first one iov_len % bsize != 0.
+ */
+ io_size = iov[0].iov_len + iov[1].iov_len;
+ nblock = io_size / g_pool_ok.bsize;
+ iov[0].iov_len += unaligned_aligned_size;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, 0, nblock, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+ iov[0].iov_len -= unaligned_aligned_size;
+
+ /*
+ * Try to write one IOV.
+ */
+ nblock = iov[0].iov_len / g_pool_ok.bsize;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, nblock_offset, nblock, &iov[0], 1);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /*
+ * Try to write 2 IOV.
+ * Sum of IOV length is larger than IO size and last IOV is larger and iov_len % bsize != 0
+ */
+ offset = iov[0].iov_len / g_pool_ok.bsize;
+ io_size = iov[1].iov_len + iov[2].iov_len;
+ nblock = io_size / g_pool_ok.bsize;
+ iov[2].iov_len += unaligned_aligned_size;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, nblock_offset + offset, nblock,
+ &iov[1], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+ iov[2].iov_len -= unaligned_aligned_size;
+
+ /*
+ * Examine pool state:
+ * 1. Written area should have expected values.
+ * 2. Anything else should contain zeros.
+ */
+ offset = nblock_offset * g_pool_ok.bsize + total_io_size;
+ rc = memcmp(&g_pool_ok.buffer[0], write_buf, offset);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ for (i = offset; i < buf_size; i++) {
+ if (g_pool_ok.buffer[i] != 0) {
+ CU_ASSERT_EQUAL(g_pool_ok.buffer[i], 0);
+ break;
+ }
+ }
+
+ /* Setup IOV for reads */
+ memset(read_buf, 0xAB, buf_size);
+ offset = nblock_offset * g_pool_ok.bsize;
+ for (i = 0; i < 3; i++) {
+ iov[i].iov_base = &read_buf[offset];
+ offset += iov[i].iov_len;
+ }
+
+ /*
+ * Write outside pool.
+ */
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, g_pool_ok.nblock, 1, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+
+ /*
+ * Read with insufficient IOV buffers length.
+ */
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, 0, g_pool_ok.nblock, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+
+ /*
+ * Try to read two IOV with first one iov_len % bsize != 0.
+ */
+ io_size = iov[0].iov_len + iov[1].iov_len;
+ nblock = io_size / g_pool_ok.bsize;
+ iov[0].iov_len += unaligned_aligned_size;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, 0, nblock, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+ iov[0].iov_len -= unaligned_aligned_size;
+
+ /*
+ * Try to write one IOV.
+ */
+ nblock = iov[0].iov_len / g_pool_ok.bsize;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, nblock_offset, nblock, &iov[0], 1);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /*
+ * Try to read 2 IOV.
+ * Sum of IOV length is larger than IO size and last IOV is larger and iov_len % bsize != 0
+ */
+ offset = iov[0].iov_len / g_pool_ok.bsize;
+ io_size = iov[1].iov_len + iov[2].iov_len;
+ nblock = io_size / g_pool_ok.bsize;
+ iov[2].iov_len += unaligned_aligned_size;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, nblock_offset + offset, nblock,
+ &iov[1], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+ iov[2].iov_len -= unaligned_aligned_size;
+
+
+ /*
+ * Examine what we read state:
+ * 1. Written area should have expected values.
+ * 2. Anything else should contain zeros.
+ */
+ offset = nblock_offset * g_pool_ok.bsize;
+ for (i = 0; i < offset; i++) {
+ if (read_buf[i] != 0xAB) {
+ CU_ASSERT_EQUAL(read_buf[i], 0xAB);
+ break;
+ }
+ }
+
+ rc = memcmp(&read_buf[offset], &write_buf[offset], total_io_size);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ offset += total_io_size;
+ for (i = offset; i < buf_size; i++) {
+ if (read_buf[i] != 0xAB) {
+ CU_ASSERT_EQUAL(read_buf[i], 0xAB);
+ break;
+ }
+ }
+
+ memset(g_pool_ok.buffer, 0, g_pool_ok.bsize * g_pool_ok.nblock);
+ free(write_buf);
+ free(read_buf);
+
+ /* Now remove this bdev */
+ ut_bdev_pmem_destruct(bdev);
+ CU_ASSERT_FALSE(g_pool_ok.is_open);
+ CU_ASSERT_EQUAL(g_opened_pools, 0);
+}
+
+static void
+ut_pmem_reset(void)
+{
+ struct spdk_bdev *bdev;
+ int rc;
+
+ rc = spdk_create_pmem_disk(g_pool_ok.name, g_bdev_name, &bdev);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_RESET, 0, 0, NULL, 0);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ ut_bdev_pmem_destruct(bdev);
+}
+
+static void
+ut_pmem_unmap_write_zero(int16_t io_type)
+{
+ struct spdk_bdev *bdev;
+ size_t buff_size = g_pool_ok.nblock * g_pool_ok.bsize;
+ size_t i;
+ uint8_t *buffer;
+ int rc;
+
+ CU_ASSERT(io_type == SPDK_BDEV_IO_TYPE_UNMAP || io_type == SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
+ rc = spdk_create_pmem_disk(g_pool_ok.name, g_bdev_name, &bdev);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ SPDK_CU_ASSERT_FATAL(g_pool_ok.nblock > 40);
+
+ buffer = calloc(1, buff_size);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ for (i = 10 * g_pool_ok.bsize; i < 30 * g_pool_ok.bsize; i++) {
+ buffer[i] = 0x30 + io_type + i;
+ }
+ memcpy(g_pool_ok.buffer, buffer, buff_size);
+
+ /*
+ * Block outside of pool.
+ */
+ rc = bdev_submit_request(bdev, io_type, g_pool_ok.nblock, 1, NULL, 0);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+
+ rc = memcmp(buffer, g_pool_ok.buffer, buff_size);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /*
+ * Blocks 15 to 25
+ */
+ memset(&buffer[15 * g_pool_ok.bsize], 0, 10 * g_pool_ok.bsize);
+ rc = bdev_submit_request(bdev, io_type, 15, 10, NULL, 0);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ rc = memcmp(buffer, g_pool_ok.buffer, buff_size);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /*
+ * All blocks.
+ */
+ memset(buffer, 0, buff_size);
+ rc = bdev_submit_request(bdev, io_type, 0, g_pool_ok.nblock, NULL, 0);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ rc = memcmp(buffer, g_pool_ok.buffer, buff_size);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /* Now remove this bdev */
+ ut_bdev_pmem_destruct(bdev);
+ CU_ASSERT_FALSE(g_pool_ok.is_open);
+ CU_ASSERT_EQUAL(g_opened_pools, 0);
+
+ free(buffer);
+}
+
+static void
+ut_pmem_write_zero(void)
+{
+ ut_pmem_unmap_write_zero(SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
+}
+
+static void
+ut_pmem_unmap(void)
+{
+ ut_pmem_unmap_write_zero(SPDK_BDEV_IO_TYPE_UNMAP);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("bdev_pmem", ut_pmem_blk_init, ut_pmem_blk_clean);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "ut_pmem_init", ut_pmem_init) == NULL ||
+ CU_add_test(suite, "ut_pmem_open_close", ut_pmem_open_close) == NULL ||
+ CU_add_test(suite, "ut_pmem_write_read", ut_pmem_write_read) == NULL ||
+ CU_add_test(suite, "ut_pmem_reset", ut_pmem_reset) == NULL ||
+ CU_add_test(suite, "ut_pmem_write_zero", ut_pmem_write_zero) == NULL ||
+ CU_add_test(suite, "ut_pmem_unmap", ut_pmem_unmap) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/scsi_nvme.c/.gitignore b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/.gitignore
new file mode 100644
index 00000000..75800527
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/.gitignore
@@ -0,0 +1 @@
+scsi_nvme_ut
diff --git a/src/spdk/test/unit/lib/bdev/scsi_nvme.c/Makefile b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/Makefile
new file mode 100644
index 00000000..0c908148
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2016 FUJITSU LIMITED, All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = scsi_nvme_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/scsi_nvme.c/scsi_nvme_ut.c b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/scsi_nvme_ut.c
new file mode 100644
index 00000000..9b2eff35
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/scsi_nvme_ut.c
@@ -0,0 +1,142 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 FUJITSU LIMITED, All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "bdev/scsi_nvme.c"
+
+static int
+null_init(void)
+{
+ return 0;
+}
+
+static int
+null_clean(void)
+{
+ return 0;
+}
+
+static void
+scsi_nvme_translate_test(void)
+{
+ struct spdk_bdev_io bdev_io;
+ int sc, sk, asc, ascq;
+
+ /* SPDK_NVME_SCT_GENERIC */
+ bdev_io.internal.error.nvme.sct = SPDK_NVME_SCT_GENERIC;
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_ABORTED_POWER_LOSS;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_TASK_ABORTED);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ABORTED_COMMAND);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_WARNING);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_POWER_LOSS_EXPECTED);
+
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+
+ /* SPDK_NVME_SCT_COMMAND_SPECIFIC */
+ bdev_io.internal.error.nvme.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_INVALID_FORMAT;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_FORMAT_COMMAND_FAILED);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_FORMAT_COMMAND_FAILED);
+
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_OVERLAPPING_RANGE;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+
+ /* SPDK_NVME_SCT_MEDIA_ERROR */
+ bdev_io.internal.error.nvme.sct = SPDK_NVME_SCT_MEDIA_ERROR;
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_GUARD_CHECK_ERROR;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_MEDIUM_ERROR);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_LOGICAL_BLOCK_GUARD_CHECK_FAILED);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_LOGICAL_BLOCK_GUARD_CHECK_FAILED);
+
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_DEALLOCATED_OR_UNWRITTEN_BLOCK;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+
+ /* SPDK_NVME_SCT_VENDOR_SPECIFIC */
+ bdev_io.internal.error.nvme.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
+ bdev_io.internal.error.nvme.sc = 0xff;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("scsi_nvme_suite", null_init, null_clean);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "scsi_nvme - translate nvme error to scsi error",
+ scsi_nvme_translate_test) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/.gitignore b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/.gitignore
new file mode 100644
index 00000000..5f2f6fdf
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/.gitignore
@@ -0,0 +1 @@
+vbdev_lvol_ut
diff --git a/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/Makefile b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/Makefile
new file mode 100644
index 00000000..c2e6b99e
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = vbdev_lvol_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut.c b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut.c
new file mode 100644
index 00000000..2500378b
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut.c
@@ -0,0 +1,1410 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+#include "spdk/string.h"
+
+#include "bdev/lvol/vbdev_lvol.c"
+
+#define SPDK_BS_PAGE_SIZE 0x1000
+
+int g_lvolerrno;
+int g_lvserrno;
+int g_cluster_size;
+int g_registered_bdevs;
+int g_num_lvols = 0;
+struct spdk_lvol_store *g_lvs = NULL;
+struct spdk_lvol *g_lvol = NULL;
+struct lvol_store_bdev *g_lvs_bdev = NULL;
+struct spdk_bdev *g_base_bdev = NULL;
+struct spdk_bdev_io *g_io = NULL;
+struct spdk_io_channel *g_ch = NULL;
+struct lvol_task *g_task = NULL;
+
+static struct spdk_bdev g_bdev = {};
+static struct spdk_lvol_store *g_lvol_store = NULL;
+bool lvol_store_initialize_fail = false;
+bool lvol_store_initialize_cb_fail = false;
+bool lvol_already_opened = false;
+bool g_examine_done = false;
+bool g_bdev_alias_already_exists = false;
+bool g_lvs_with_name_already_exists = false;
+bool g_lvol_deletable = true;
+
+int
+spdk_bdev_alias_add(struct spdk_bdev *bdev, const char *alias)
+{
+ struct spdk_bdev_alias *tmp;
+
+ CU_ASSERT(alias != NULL);
+ CU_ASSERT(bdev != NULL);
+ if (g_bdev_alias_already_exists) {
+ return -EEXIST;
+ }
+
+ tmp = calloc(1, sizeof(*tmp));
+ SPDK_CU_ASSERT_FATAL(tmp != NULL);
+
+ tmp->alias = strdup(alias);
+ SPDK_CU_ASSERT_FATAL(tmp->alias != NULL);
+
+ TAILQ_INSERT_TAIL(&bdev->aliases, tmp, tailq);
+
+ return 0;
+}
+
+int
+spdk_bdev_alias_del(struct spdk_bdev *bdev, const char *alias)
+{
+ struct spdk_bdev_alias *tmp;
+
+ CU_ASSERT(alias != NULL);
+ CU_ASSERT(bdev != NULL);
+
+ TAILQ_FOREACH(tmp, &bdev->aliases, tailq) {
+ if (strncmp(alias, tmp->alias, SPDK_LVOL_NAME_MAX) == 0) {
+ TAILQ_REMOVE(&bdev->aliases, tmp, tailq);
+ free(tmp->alias);
+ free(tmp);
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+void
+spdk_bdev_alias_del_all(struct spdk_bdev *bdev)
+{
+ struct spdk_bdev_alias *p, *tmp;
+
+ TAILQ_FOREACH_SAFE(p, &bdev->aliases, tailq, tmp) {
+ TAILQ_REMOVE(&bdev->aliases, p, tailq);
+ free(p->alias);
+ free(p);
+ }
+}
+
+void
+spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno)
+{
+}
+
+void
+spdk_lvs_rename(struct spdk_lvol_store *lvs, const char *new_name,
+ spdk_lvs_op_complete cb_fn, void *cb_arg)
+{
+ if (g_lvs_with_name_already_exists) {
+ g_lvolerrno = -EEXIST;
+ } else {
+ snprintf(lvs->name, sizeof(lvs->name), "%s", new_name);
+ g_lvolerrno = 0;
+ }
+
+ cb_fn(cb_arg, g_lvolerrno);
+}
+
+void
+spdk_lvol_rename(struct spdk_lvol *lvol, const char *new_name,
+ spdk_lvol_op_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol *tmp;
+
+ if (strncmp(lvol->name, new_name, SPDK_LVOL_NAME_MAX) == 0) {
+ cb_fn(cb_arg, 0);
+ return;
+ }
+
+ TAILQ_FOREACH(tmp, &lvol->lvol_store->lvols, link) {
+ if (strncmp(tmp->name, new_name, SPDK_LVOL_NAME_MAX) == 0) {
+ SPDK_ERRLOG("Lvol %s already exists in lvol store %s\n", new_name, lvol->lvol_store->name);
+ cb_fn(cb_arg, -EEXIST);
+ return;
+ }
+ }
+
+ snprintf(lvol->name, sizeof(lvol->name), "%s", new_name);
+
+ cb_fn(cb_arg, g_lvolerrno);
+}
+
+void
+spdk_lvol_open(struct spdk_lvol *lvol, spdk_lvol_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, lvol, g_lvolerrno);
+}
+
+uint64_t
+spdk_blob_get_num_clusters(struct spdk_blob *b)
+{
+ return 0;
+}
+
+int
+spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids,
+ size_t *count)
+{
+ *count = 0;
+ return 0;
+}
+
+spdk_blob_id
+spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid)
+{
+ return 0;
+}
+
+bool g_blob_is_read_only = false;
+
+bool
+spdk_blob_is_read_only(struct spdk_blob *blob)
+{
+ return g_blob_is_read_only;
+}
+
+bool
+spdk_blob_is_snapshot(struct spdk_blob *blob)
+{
+ return false;
+}
+
+bool
+spdk_blob_is_clone(struct spdk_blob *blob)
+{
+ return false;
+}
+
+bool
+spdk_blob_is_thin_provisioned(struct spdk_blob *blob)
+{
+ return false;
+}
+
+static struct spdk_lvol *_lvol_create(struct spdk_lvol_store *lvs);
+
+void
+spdk_lvs_load(struct spdk_bs_dev *dev,
+ spdk_lvs_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol_store *lvs = NULL;
+ int i;
+ int lvserrno = g_lvserrno;
+
+ if (lvserrno != 0) {
+ /* On error blobstore destroys bs_dev itself,
+ * by puttin back io channels.
+ * This operation is asynchronous, and completed
+ * after calling the callback for lvol. */
+ cb_fn(cb_arg, g_lvol_store, lvserrno);
+ dev->destroy(dev);
+ return;
+ }
+
+ lvs = calloc(1, sizeof(*lvs));
+ SPDK_CU_ASSERT_FATAL(lvs != NULL);
+ TAILQ_INIT(&lvs->lvols);
+ TAILQ_INIT(&lvs->pending_lvols);
+ spdk_uuid_generate(&lvs->uuid);
+ lvs->bs_dev = dev;
+ for (i = 0; i < g_num_lvols; i++) {
+ _lvol_create(lvs);
+ }
+
+ cb_fn(cb_arg, lvs, lvserrno);
+}
+
+int
+spdk_bs_bdev_claim(struct spdk_bs_dev *bs_dev, struct spdk_bdev_module *module)
+{
+ if (lvol_already_opened == true) {
+ return -1;
+ }
+
+ lvol_already_opened = true;
+
+ return 0;
+}
+
+void
+spdk_bdev_unregister(struct spdk_bdev *vbdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
+{
+ int rc;
+
+ SPDK_CU_ASSERT_FATAL(vbdev != NULL);
+ rc = vbdev->fn_table->destruct(vbdev->ctxt);
+
+ SPDK_CU_ASSERT_FATAL(cb_fn != NULL);
+ cb_fn(cb_arg, rc);
+}
+
+void
+spdk_bdev_module_finish_done(void)
+{
+ return;
+}
+
+uint64_t
+spdk_bs_get_page_size(struct spdk_blob_store *bs)
+{
+ return SPDK_BS_PAGE_SIZE;
+}
+
+uint64_t
+spdk_bs_get_io_unit_size(struct spdk_blob_store *bs)
+{
+ return SPDK_BS_PAGE_SIZE;
+}
+
+static void
+bdev_blob_destroy(struct spdk_bs_dev *bs_dev)
+{
+ CU_ASSERT(bs_dev != NULL);
+ free(bs_dev);
+ lvol_already_opened = false;
+}
+
+struct spdk_bs_dev *
+spdk_bdev_create_bs_dev(struct spdk_bdev *bdev, spdk_bdev_remove_cb_t remove_cb, void *remove_ctx)
+{
+ struct spdk_bs_dev *bs_dev;
+
+ if (lvol_already_opened == true || bdev == NULL) {
+ return NULL;
+ }
+
+ bs_dev = calloc(1, sizeof(*bs_dev));
+ SPDK_CU_ASSERT_FATAL(bs_dev != NULL);
+ bs_dev->destroy = bdev_blob_destroy;
+
+ return bs_dev;
+}
+
+void
+spdk_lvs_opts_init(struct spdk_lvs_opts *opts)
+{
+}
+
+int
+spdk_lvs_init(struct spdk_bs_dev *bs_dev, struct spdk_lvs_opts *o,
+ spdk_lvs_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol_store *lvs;
+ int error = 0;
+
+ if (lvol_store_initialize_fail) {
+ return -1;
+ }
+
+ if (lvol_store_initialize_cb_fail) {
+ bs_dev->destroy(bs_dev);
+ lvs = NULL;
+ error = -1;
+ } else {
+ lvs = calloc(1, sizeof(*lvs));
+ SPDK_CU_ASSERT_FATAL(lvs != NULL);
+ TAILQ_INIT(&lvs->lvols);
+ TAILQ_INIT(&lvs->pending_lvols);
+ spdk_uuid_generate(&lvs->uuid);
+ snprintf(lvs->name, sizeof(lvs->name), "%s", o->name);
+ lvs->bs_dev = bs_dev;
+ error = 0;
+ }
+ cb_fn(cb_arg, lvs, error);
+
+ return 0;
+}
+
+int
+spdk_lvs_unload(struct spdk_lvol_store *lvs, spdk_lvs_op_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol *lvol, *tmp;
+
+ TAILQ_FOREACH_SAFE(lvol, &lvs->lvols, link, tmp) {
+ TAILQ_REMOVE(&lvs->lvols, lvol, link);
+ free(lvol->unique_id);
+ free(lvol);
+ }
+ g_lvol_store = NULL;
+
+ lvs->bs_dev->destroy(lvs->bs_dev);
+ free(lvs);
+
+ if (cb_fn != NULL) {
+ cb_fn(cb_arg, 0);
+ }
+
+ return 0;
+}
+
+int
+spdk_lvs_destroy(struct spdk_lvol_store *lvs, spdk_lvs_op_complete cb_fn,
+ void *cb_arg)
+{
+ struct spdk_lvol *lvol, *tmp;
+ char *alias;
+
+ TAILQ_FOREACH_SAFE(lvol, &lvs->lvols, link, tmp) {
+ TAILQ_REMOVE(&lvs->lvols, lvol, link);
+
+ alias = spdk_sprintf_alloc("%s/%s", lvs->name, lvol->name);
+ if (alias == NULL) {
+ SPDK_ERRLOG("Cannot alloc memory for alias\n");
+ return -1;
+ }
+ spdk_bdev_alias_del(lvol->bdev, alias);
+
+ free(alias);
+ free(lvol->unique_id);
+ free(lvol);
+ }
+ g_lvol_store = NULL;
+
+ lvs->bs_dev->destroy(lvs->bs_dev);
+ free(lvs);
+
+ if (cb_fn != NULL) {
+ cb_fn(cb_arg, 0);
+ }
+
+ return 0;
+}
+
+void
+spdk_lvol_resize(struct spdk_lvol *lvol, size_t sz, spdk_lvol_op_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, 0);
+}
+
+int
+spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
+{
+ bdev->blockcnt = size;
+ return 0;
+}
+
+uint64_t
+spdk_bs_get_cluster_size(struct spdk_blob_store *bs)
+{
+ return g_cluster_size;
+}
+
+struct spdk_bdev *
+spdk_bdev_get_by_name(const char *bdev_name)
+{
+ if (!strcmp(g_base_bdev->name, bdev_name)) {
+ return g_base_bdev;
+ }
+
+ return NULL;
+}
+
+void
+spdk_lvol_close(struct spdk_lvol *lvol, spdk_lvol_op_complete cb_fn, void *cb_arg)
+{
+ lvol->ref_count--;
+
+ SPDK_CU_ASSERT_FATAL(cb_fn != NULL);
+ cb_fn(cb_arg, 0);
+}
+
+bool
+spdk_lvol_deletable(struct spdk_lvol *lvol)
+{
+ return g_lvol_deletable;
+}
+
+void
+spdk_lvol_destroy(struct spdk_lvol *lvol, spdk_lvol_op_complete cb_fn, void *cb_arg)
+{
+ if (lvol->ref_count != 0) {
+ cb_fn(cb_arg, -ENODEV);
+ }
+
+ TAILQ_REMOVE(&lvol->lvol_store->lvols, lvol, link);
+
+ SPDK_CU_ASSERT_FATAL(cb_fn != NULL);
+ cb_fn(cb_arg, 0);
+
+ g_lvol = NULL;
+ free(lvol->unique_id);
+ free(lvol);
+}
+
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+}
+
+struct spdk_io_channel *spdk_lvol_get_io_channel(struct spdk_lvol *lvol)
+{
+ CU_ASSERT(lvol == g_lvol);
+ return g_ch;
+}
+
+void
+spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
+{
+ CU_ASSERT(cb == lvol_read);
+}
+
+void
+spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ void *payload, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+}
+
+void
+spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ void *payload, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+}
+
+void
+spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ CU_ASSERT(blob == NULL);
+ CU_ASSERT(channel == g_ch);
+ CU_ASSERT(offset == g_io->u.bdev.offset_blocks);
+ CU_ASSERT(length == g_io->u.bdev.num_blocks);
+}
+
+void
+spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ CU_ASSERT(blob == NULL);
+ CU_ASSERT(channel == g_ch);
+ CU_ASSERT(offset == g_io->u.bdev.offset_blocks);
+ CU_ASSERT(length == g_io->u.bdev.num_blocks);
+}
+
+void
+spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ CU_ASSERT(blob == NULL);
+ CU_ASSERT(channel == g_ch);
+ CU_ASSERT(offset == g_io->u.bdev.offset_blocks);
+ CU_ASSERT(length == g_io->u.bdev.num_blocks);
+}
+
+void
+spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ CU_ASSERT(blob == NULL);
+ CU_ASSERT(channel == g_ch);
+ CU_ASSERT(offset == g_io->u.bdev.offset_blocks);
+ CU_ASSERT(length == g_io->u.bdev.num_blocks);
+}
+
+void
+spdk_bdev_module_list_add(struct spdk_bdev_module *bdev_module)
+{
+}
+
+int
+spdk_json_write_name(struct spdk_json_write_ctx *w, const char *name)
+{
+ return 0;
+}
+
+int
+spdk_json_write_array_begin(struct spdk_json_write_ctx *w)
+{
+ return 0;
+}
+
+int
+spdk_json_write_array_end(struct spdk_json_write_ctx *w)
+{
+ return 0;
+}
+
+int
+spdk_json_write_string(struct spdk_json_write_ctx *w, const char *val)
+{
+ return 0;
+}
+
+int
+spdk_json_write_bool(struct spdk_json_write_ctx *w, bool val)
+{
+ return 0;
+}
+
+int
+spdk_json_write_object_begin(struct spdk_json_write_ctx *w)
+{
+ return 0;
+}
+
+int
+spdk_json_write_object_end(struct spdk_json_write_ctx *w)
+{
+ return 0;
+}
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return "test";
+}
+
+int
+spdk_vbdev_register(struct spdk_bdev *vbdev, struct spdk_bdev **base_bdevs, int base_bdev_count)
+{
+ TAILQ_INIT(&vbdev->aliases);
+
+ g_registered_bdevs++;
+ return 0;
+}
+
+void
+spdk_bdev_module_examine_done(struct spdk_bdev_module *module)
+{
+ SPDK_CU_ASSERT_FATAL(g_examine_done != true);
+ g_examine_done = true;
+}
+
+static struct spdk_lvol *
+_lvol_create(struct spdk_lvol_store *lvs)
+{
+ struct spdk_lvol *lvol = calloc(1, sizeof(*lvol));
+
+ SPDK_CU_ASSERT_FATAL(lvol != NULL);
+
+ lvol->lvol_store = lvs;
+ lvol->ref_count++;
+ lvol->unique_id = spdk_sprintf_alloc("%s", "UNIT_TEST_UUID");
+ SPDK_CU_ASSERT_FATAL(lvol->unique_id != NULL);
+
+ TAILQ_INSERT_TAIL(&lvol->lvol_store->lvols, lvol, link);
+
+ return lvol;
+}
+
+int
+spdk_lvol_create(struct spdk_lvol_store *lvs, const char *name, size_t sz,
+ bool thin_provision, spdk_lvol_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol *lvol;
+
+ lvol = _lvol_create(lvs);
+ snprintf(lvol->name, sizeof(lvol->name), "%s", name);
+ cb_fn(cb_arg, lvol, 0);
+
+ return 0;
+}
+
+void
+spdk_lvol_create_snapshot(struct spdk_lvol *lvol, const char *snapshot_name,
+ spdk_lvol_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol *snap;
+
+ snap = _lvol_create(lvol->lvol_store);
+ snprintf(snap->name, sizeof(snap->name), "%s", snapshot_name);
+ cb_fn(cb_arg, snap, 0);
+}
+
+void
+spdk_lvol_create_clone(struct spdk_lvol *lvol, const char *clone_name,
+ spdk_lvol_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol *clone;
+
+ clone = _lvol_create(lvol->lvol_store);
+ snprintf(clone->name, sizeof(clone->name), "%s", clone_name);
+ cb_fn(cb_arg, clone, 0);
+}
+
+static void
+lvol_store_op_complete(void *cb_arg, int lvserrno)
+{
+ g_lvserrno = lvserrno;
+ return;
+}
+
+static void
+lvol_store_op_with_handle_complete(void *cb_arg, struct spdk_lvol_store *lvs, int lvserrno)
+{
+ g_lvserrno = lvserrno;
+ g_lvol_store = lvs;
+ return;
+}
+
+static void
+vbdev_lvol_create_complete(void *cb_arg, struct spdk_lvol *lvol, int lvolerrno)
+{
+ g_lvolerrno = lvolerrno;
+ g_lvol = lvol;
+}
+
+static void
+vbdev_lvol_resize_complete(void *cb_arg, int lvolerrno)
+{
+ g_lvolerrno = lvolerrno;
+}
+
+static void
+vbdev_lvol_rename_complete(void *cb_arg, int lvolerrno)
+{
+ g_lvolerrno = lvolerrno;
+}
+
+static void
+ut_lvs_destroy(void)
+{
+ int rc = 0;
+ int sz = 10;
+ struct spdk_lvol_store *lvs;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+
+ lvs = g_lvol_store;
+ g_lvol_store = NULL;
+
+ spdk_uuid_generate(&lvs->uuid);
+
+ /* Successfully create lvol, which should be unloaded with lvs later */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, vbdev_lvol_create_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvolerrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ /* Unload lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_init(void)
+{
+ struct spdk_lvol_store *lvs;
+ int sz = 10;
+ int rc;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvol create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ /* Successful lvol destroy */
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_snapshot(void)
+{
+ struct spdk_lvol_store *lvs;
+ int sz = 10;
+ int rc;
+ struct spdk_lvol *lvol = NULL;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvol create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ lvol = g_lvol;
+
+ /* Successful snap create */
+ vbdev_lvol_create_snapshot(lvol, "snap", vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ /* Successful lvol destroy */
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Successful snap destroy */
+ g_lvol = lvol;
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_clone(void)
+{
+ struct spdk_lvol_store *lvs;
+ int sz = 10;
+ int rc;
+ struct spdk_lvol *lvol = NULL;
+ struct spdk_lvol *snap = NULL;
+ struct spdk_lvol *clone = NULL;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvol create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ lvol = g_lvol;
+
+ /* Successful snap create */
+ vbdev_lvol_create_snapshot(lvol, "snap", vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ snap = g_lvol;
+
+ /* Successful clone create */
+ vbdev_lvol_create_clone(snap, "clone", vbdev_lvol_create_complete, NULL);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ clone = g_lvol;
+
+ /* Successful lvol destroy */
+ g_lvol = lvol;
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Successful clone destroy */
+ g_lvol = clone;
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Successful lvol destroy */
+ g_lvol = snap;
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_hotremove(void)
+{
+ int rc = 0;
+
+ lvol_store_initialize_fail = false;
+ lvol_store_initialize_cb_fail = false;
+ lvol_already_opened = false;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+
+ /* Hot remove callback with NULL - stability check */
+ vbdev_lvs_hotremove_cb(NULL);
+
+ /* Hot remove lvs on bdev removal */
+ vbdev_lvs_hotremove_cb(&g_bdev);
+
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_lvol_pairs));
+
+}
+
+static void
+ut_lvs_examine_check(bool success)
+{
+ struct lvol_store_bdev *lvs_bdev;
+
+ /* Examine was finished regardless of result */
+ CU_ASSERT(g_examine_done == true);
+ g_examine_done = false;
+
+ if (success) {
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_spdk_lvol_pairs));
+ lvs_bdev = TAILQ_FIRST(&g_spdk_lvol_pairs);
+ SPDK_CU_ASSERT_FATAL(lvs_bdev != NULL);
+ g_lvol_store = lvs_bdev->lvs;
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ } else {
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_spdk_lvol_pairs));
+ g_lvol_store = NULL;
+ }
+}
+
+static void
+ut_lvol_examine(void)
+{
+ /* Examine unsuccessfully - bdev already opened */
+ g_lvserrno = -1;
+ lvol_already_opened = true;
+ vbdev_lvs_examine(&g_bdev);
+ ut_lvs_examine_check(false);
+
+ /* Examine unsuccessfully - fail on lvol store */
+ g_lvserrno = -1;
+ lvol_already_opened = false;
+ vbdev_lvs_examine(&g_bdev);
+ ut_lvs_examine_check(false);
+
+ /* Examine successfully
+ * - one lvol fails to load
+ * - lvs is loaded with no lvols present */
+ g_lvserrno = 0;
+ g_lvolerrno = -1;
+ g_num_lvols = 1;
+ lvol_already_opened = false;
+ g_registered_bdevs = 0;
+ vbdev_lvs_examine(&g_bdev);
+ ut_lvs_examine_check(true);
+ CU_ASSERT(g_registered_bdevs == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_store->lvols));
+ vbdev_lvs_destruct(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ /* Examine successfully */
+ g_lvserrno = 0;
+ g_lvolerrno = 0;
+ g_registered_bdevs = 0;
+ lvol_already_opened = false;
+ vbdev_lvs_examine(&g_bdev);
+ ut_lvs_examine_check(true);
+ CU_ASSERT(g_registered_bdevs != 0);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_lvol_store->lvols));
+ vbdev_lvs_destruct(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+}
+
+static void
+ut_lvol_rename(void)
+{
+ struct spdk_lvol_store *lvs;
+ struct spdk_lvol *lvol;
+ struct spdk_lvol *lvol2;
+ int sz = 10;
+ int rc;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvols create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+ lvol = g_lvol;
+
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol2", sz, false, vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+ lvol2 = g_lvol;
+
+ /* Successful rename lvol */
+ vbdev_lvol_rename(lvol, "new_lvol_name", vbdev_lvol_rename_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_lvolerrno == 0);
+ CU_ASSERT_STRING_EQUAL(lvol->name, "new_lvol_name");
+
+ /* Renaming lvol with name already existing */
+ g_bdev_alias_already_exists = true;
+ vbdev_lvol_rename(lvol2, "new_lvol_name", vbdev_lvol_rename_complete, NULL);
+ g_bdev_alias_already_exists = false;
+ SPDK_CU_ASSERT_FATAL(g_lvolerrno != 0);
+ CU_ASSERT_STRING_NOT_EQUAL(lvol2->name, "new_lvol_name");
+
+ /* Renaming lvol with it's own name */
+ vbdev_lvol_rename(lvol, "new_lvol_name", vbdev_lvol_rename_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_lvolerrno == 0);
+ CU_ASSERT_STRING_EQUAL(lvol->name, "new_lvol_name");
+
+ /* Successful lvols destroy */
+ vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ vbdev_lvol_destroy(lvol2, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_destroy(void)
+{
+ struct spdk_lvol_store *lvs;
+ struct spdk_lvol *lvol;
+ struct spdk_lvol *lvol2;
+ int sz = 10;
+ int rc;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvols create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+ lvol = g_lvol;
+
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol2", sz, false, vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+ lvol2 = g_lvol;
+
+ /* Unsuccessful lvols destroy */
+ g_lvol_deletable = false;
+ vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvserrno == -EPERM);
+
+ g_lvol_deletable = true;
+ /* Successful lvols destroy */
+ vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ /* Hot remove lvol bdev */
+ vbdev_lvol_unregister(lvol2);
+
+ /* Unload lvol store */
+ vbdev_lvs_unload(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_resize(void)
+{
+ struct spdk_lvol_store *lvs;
+ struct spdk_lvol *lvol;
+ int sz = 10;
+ int rc = 0;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvol create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, vbdev_lvol_create_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvolerrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol = g_lvol;
+
+ /* Successful lvol resize */
+ g_lvolerrno = -1;
+ vbdev_lvol_resize(lvol, 20, vbdev_lvol_resize_complete, NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+ CU_ASSERT(lvol->bdev->blockcnt == 20 * g_cluster_size / lvol->bdev->blocklen);
+
+ /* Resize with NULL lvol */
+ vbdev_lvol_resize(NULL, 20, vbdev_lvol_resize_complete, NULL);
+ CU_ASSERT(g_lvolerrno != 0);
+
+ /* Successful lvol destroy */
+ vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvs_unload(void)
+{
+ int rc = 0;
+ int sz = 10;
+ struct spdk_lvol_store *lvs;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+
+ lvs = g_lvol_store;
+ g_lvol_store = NULL;
+
+ spdk_uuid_generate(&lvs->uuid);
+
+ /* Successfully create lvol, which should be destroyed with lvs later */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, vbdev_lvol_create_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvolerrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ /* Unload lvol store */
+ vbdev_lvs_unload(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(g_lvol != NULL);
+}
+
+static void
+ut_lvs_init(void)
+{
+ int rc = 0;
+ struct spdk_lvol_store *lvs;
+
+ /* spdk_lvs_init() fails */
+ lvol_store_initialize_fail = true;
+
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ lvol_store_initialize_fail = false;
+
+ /* spdk_lvs_init_cb() fails */
+ lvol_store_initialize_cb_fail = true;
+
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ lvol_store_initialize_cb_fail = false;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+
+ lvs = g_lvol_store;
+ g_lvol_store = NULL;
+
+ /* Bdev with lvol store already claimed */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ /* Destruct lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_vbdev_lvol_get_io_channel(void)
+{
+ struct spdk_io_channel *ch;
+
+ g_lvol = calloc(1, sizeof(struct spdk_lvol));
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ ch = vbdev_lvol_get_io_channel(g_lvol);
+ CU_ASSERT(ch == g_ch);
+
+ free(g_lvol);
+}
+
+static void
+ut_vbdev_lvol_io_type_supported(void)
+{
+ struct spdk_lvol *lvol;
+ bool ret;
+
+ lvol = calloc(1, sizeof(struct spdk_lvol));
+ SPDK_CU_ASSERT_FATAL(lvol != NULL);
+
+ g_blob_is_read_only = false;
+
+ /* Supported types */
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_READ);
+ CU_ASSERT(ret == true);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_WRITE);
+ CU_ASSERT(ret == true);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_RESET);
+ CU_ASSERT(ret == true);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_UNMAP);
+ CU_ASSERT(ret == true);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
+ CU_ASSERT(ret == true);
+
+ /* Unsupported types */
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_FLUSH);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_NVME_ADMIN);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_NVME_IO);
+ CU_ASSERT(ret == false);
+
+ g_blob_is_read_only = true;
+
+ /* Supported types */
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_READ);
+ CU_ASSERT(ret == true);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_RESET);
+ CU_ASSERT(ret == true);
+
+ /* Unsupported types */
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_WRITE);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_UNMAP);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_FLUSH);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_NVME_ADMIN);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_NVME_IO);
+ CU_ASSERT(ret == false);
+
+ free(lvol);
+}
+
+static void
+ut_lvol_read_write(void)
+{
+ g_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct lvol_task));
+ SPDK_CU_ASSERT_FATAL(g_io != NULL);
+ g_base_bdev = calloc(1, sizeof(struct spdk_bdev));
+ SPDK_CU_ASSERT_FATAL(g_base_bdev != NULL);
+ g_lvol = calloc(1, sizeof(struct spdk_lvol));
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ g_task = (struct lvol_task *)g_io->driver_ctx;
+ g_io->bdev = g_base_bdev;
+ g_io->bdev->ctxt = g_lvol;
+ g_io->u.bdev.offset_blocks = 20;
+ g_io->u.bdev.num_blocks = 20;
+
+ lvol_read(g_ch, g_io);
+ CU_ASSERT(g_task->status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ lvol_write(g_lvol, g_ch, g_io);
+ CU_ASSERT(g_task->status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ free(g_io);
+ free(g_base_bdev);
+ free(g_lvol);
+}
+
+static void
+ut_vbdev_lvol_submit_request(void)
+{
+ struct spdk_lvol request_lvol = {};
+ g_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct lvol_task));
+ SPDK_CU_ASSERT_FATAL(g_io != NULL);
+ g_base_bdev = calloc(1, sizeof(struct spdk_bdev));
+ SPDK_CU_ASSERT_FATAL(g_base_bdev != NULL);
+ g_task = (struct lvol_task *)g_io->driver_ctx;
+ g_io->bdev = g_base_bdev;
+
+ g_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_base_bdev->ctxt = &request_lvol;
+ vbdev_lvol_submit_request(g_ch, g_io);
+
+ free(g_io);
+ free(g_base_bdev);
+}
+
+static void
+ut_lvs_rename(void)
+{
+ int rc = 0;
+ int sz = 10;
+ struct spdk_lvol_store *lvs;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "old_lvs_name", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+
+ lvs = g_lvol_store;
+ g_lvol_store = NULL;
+
+ g_base_bdev = calloc(1, sizeof(*g_base_bdev));
+ SPDK_CU_ASSERT_FATAL(g_base_bdev != NULL);
+
+ /* Successfully create lvol, which should be destroyed with lvs later */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, vbdev_lvol_create_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvolerrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ /* Trying to rename lvs with lvols created */
+ vbdev_lvs_rename(lvs, "new_lvs_name", lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name");
+ CU_ASSERT_STRING_EQUAL(TAILQ_FIRST(&g_lvol->bdev->aliases)->alias, "new_lvs_name/lvol");
+
+ /* Trying to rename lvs with name already used by another lvs */
+ /* This is a bdev_lvol test, so g_lvs_with_name_already_exists simulates
+ * existing lvs with name 'another_new_lvs_name' and this name in fact is not compared */
+ g_lvs_with_name_already_exists = true;
+ vbdev_lvs_rename(lvs, "another_new_lvs_name", lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == -EEXIST);
+ CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name");
+ CU_ASSERT_STRING_EQUAL(TAILQ_FIRST(&g_lvol->bdev->aliases)->alias, "new_lvs_name/lvol");
+ g_lvs_with_name_already_exists = false;
+
+ /* Unload lvol store */
+ g_lvol_store = lvs;
+ vbdev_lvs_destruct(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ free(g_base_bdev->name);
+ free(g_base_bdev);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("lvol", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "ut_lvs_init", ut_lvs_init) == NULL ||
+ CU_add_test(suite, "ut_lvol_init", ut_lvol_init) == NULL ||
+ CU_add_test(suite, "ut_lvol_snapshot", ut_lvol_snapshot) == NULL ||
+ CU_add_test(suite, "ut_lvol_clone", ut_lvol_clone) == NULL ||
+ CU_add_test(suite, "ut_lvs_destroy", ut_lvs_destroy) == NULL ||
+ CU_add_test(suite, "ut_lvs_unload", ut_lvs_unload) == NULL ||
+ CU_add_test(suite, "ut_lvol_resize", ut_lvol_resize) == NULL ||
+ CU_add_test(suite, "lvol_hotremove", ut_lvol_hotremove) == NULL ||
+ CU_add_test(suite, "ut_vbdev_lvol_get_io_channel", ut_vbdev_lvol_get_io_channel) == NULL ||
+ CU_add_test(suite, "ut_vbdev_lvol_io_type_supported", ut_vbdev_lvol_io_type_supported) == NULL ||
+ CU_add_test(suite, "ut_lvol_read_write", ut_lvol_read_write) == NULL ||
+ CU_add_test(suite, "ut_vbdev_lvol_submit_request", ut_vbdev_lvol_submit_request) == NULL ||
+ CU_add_test(suite, "lvol_examine", ut_lvol_examine) == NULL ||
+ CU_add_test(suite, "ut_lvol_rename", ut_lvol_rename) == NULL ||
+ CU_add_test(suite, "ut_lvol_destroy", ut_lvol_destroy) == NULL ||
+ CU_add_test(suite, "ut_lvs_rename", ut_lvs_rename) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}