summaryrefslogtreecommitdiffstats
path: root/src/spdk/test/unit/lib/nvme
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
commit19fcec84d8d7d21e796c7624e521b60d28ee21ed (patch)
tree42d26aa27d1e3f7c0b8bd3fd14e7d7082f5008dc /src/spdk/test/unit/lib/nvme
parentInitial commit. (diff)
downloadceph-upstream.tar.xz
ceph-upstream.zip
Adding upstream version 16.2.11+ds.upstream/16.2.11+dsupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/test/unit/lib/nvme')
-rw-r--r--src/spdk/test/unit/lib/nvme/Makefile47
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme.c/nvme_ut.c1376
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c2150
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut.c751
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut.c106
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns.c/nvme_ns_ut.c153
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c1739
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut.c650
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_pcie.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_pcie.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c498
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_poll_group.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_poll_group.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_poll_group.c/nvme_poll_group_ut.c484
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_qpair.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_qpair.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c625
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_quirks.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_quirks.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_quirks.c/nvme_quirks_ut.c92
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_rdma.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_rdma.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c406
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_tcp.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_tcp.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c459
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_uevent.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_uevent.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_uevent.c/nvme_uevent_ut.c165
43 files changed, 10247 insertions, 0 deletions
diff --git a/src/spdk/test/unit/lib/nvme/Makefile b/src/spdk/test/unit/lib/nvme/Makefile
new file mode 100644
index 000000000..5f74579d2
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/Makefile
@@ -0,0 +1,47 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = nvme.c nvme_ctrlr.c nvme_ctrlr_cmd.c nvme_ctrlr_ocssd_cmd.c nvme_ns.c nvme_ns_cmd.c nvme_ns_ocssd_cmd.c nvme_pcie.c nvme_poll_group.c nvme_qpair.c \
+ nvme_quirks.c nvme_tcp.c nvme_uevent.c \
+
+DIRS-$(CONFIG_RDMA) += nvme_rdma.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme.c/.gitignore
new file mode 100644
index 000000000..90c0c1678
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme.c/.gitignore
@@ -0,0 +1 @@
+nvme_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme.c/Makefile
new file mode 100644
index 000000000..4202cf54c
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme.c/nvme_ut.c b/src/spdk/test/unit/lib/nvme/nvme.c/nvme_ut.c
new file mode 100644
index 000000000..cf51a14bd
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme.c/nvme_ut.c
@@ -0,0 +1,1376 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "spdk/env.h"
+
+#include "nvme/nvme.c"
+
+#include "spdk_internal/mock.h"
+
+#include "common/lib/test_env.c"
+
+DEFINE_STUB_V(nvme_ctrlr_proc_get_ref, (struct spdk_nvme_ctrlr *ctrlr));
+DEFINE_STUB_V(nvme_ctrlr_proc_put_ref, (struct spdk_nvme_ctrlr *ctrlr));
+DEFINE_STUB_V(nvme_ctrlr_fail, (struct spdk_nvme_ctrlr *ctrlr, bool hotremove));
+DEFINE_STUB(spdk_nvme_transport_available_by_name, bool,
+ (const char *transport_name), true);
+/* return anything non-NULL, this won't be deferenced anywhere in this test */
+DEFINE_STUB(nvme_ctrlr_get_current_process, struct spdk_nvme_ctrlr_process *,
+ (struct spdk_nvme_ctrlr *ctrlr), (struct spdk_nvme_ctrlr_process *)(uintptr_t)0x1);
+DEFINE_STUB(nvme_ctrlr_process_init, int,
+ (struct spdk_nvme_ctrlr *ctrlr), 0);
+DEFINE_STUB(nvme_ctrlr_get_ref_count, int,
+ (struct spdk_nvme_ctrlr *ctrlr), 0);
+DEFINE_STUB(dummy_probe_cb, bool,
+ (void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr_opts *opts), false);
+DEFINE_STUB(nvme_transport_ctrlr_construct, struct spdk_nvme_ctrlr *,
+ (const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ void *devhandle), NULL);
+DEFINE_STUB_V(nvme_io_msg_ctrlr_detach, (struct spdk_nvme_ctrlr *ctrlr));
+DEFINE_STUB(spdk_nvme_transport_available, bool,
+ (enum spdk_nvme_transport_type trtype), true);
+DEFINE_STUB(nvme_uevent_connect, int, (void), 1);
+
+
+static bool ut_destruct_called = false;
+void
+nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+ ut_destruct_called = true;
+}
+
+void
+spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
+{
+ memset(opts, 0, opts_size);
+ opts->opts_size = opts_size;
+}
+
+static void
+memset_trid(struct spdk_nvme_transport_id *trid1, struct spdk_nvme_transport_id *trid2)
+{
+ memset(trid1, 0, sizeof(struct spdk_nvme_transport_id));
+ memset(trid2, 0, sizeof(struct spdk_nvme_transport_id));
+}
+
+static bool ut_check_trtype = false;
+static bool ut_test_probe_internal = false;
+
+static int
+ut_nvme_pcie_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
+ bool direct_connect)
+{
+ struct spdk_nvme_ctrlr *ctrlr;
+ struct spdk_nvme_qpair qpair = {};
+ int rc;
+
+ if (probe_ctx->trid.trtype != SPDK_NVME_TRANSPORT_PCIE) {
+ return -1;
+ }
+
+ ctrlr = calloc(1, sizeof(*ctrlr));
+ CU_ASSERT(ctrlr != NULL);
+ ctrlr->adminq = &qpair;
+
+ /* happy path with first controller */
+ MOCK_SET(nvme_transport_ctrlr_construct, ctrlr);
+ rc = nvme_ctrlr_probe(&probe_ctx->trid, probe_ctx, NULL);
+ CU_ASSERT(rc == 0);
+
+ /* failed with the second controller */
+ MOCK_SET(nvme_transport_ctrlr_construct, NULL);
+ rc = nvme_ctrlr_probe(&probe_ctx->trid, probe_ctx, NULL);
+ CU_ASSERT(rc != 0);
+ MOCK_CLEAR_P(nvme_transport_ctrlr_construct);
+
+ return -1;
+}
+
+int
+nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+ free(ctrlr);
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
+ bool direct_connect)
+{
+ struct spdk_nvme_ctrlr *ctrlr = NULL;
+
+ if (ut_check_trtype == true) {
+ CU_ASSERT(probe_ctx->trid.trtype == SPDK_NVME_TRANSPORT_PCIE);
+ }
+
+ if (ut_test_probe_internal) {
+ return ut_nvme_pcie_ctrlr_scan(probe_ctx, direct_connect);
+ }
+
+ if (direct_connect == true && probe_ctx->probe_cb) {
+ nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
+ ctrlr = nvme_get_ctrlr_by_trid(&probe_ctx->trid);
+ nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
+ probe_ctx->probe_cb(probe_ctx->cb_ctx, &probe_ctx->trid, &ctrlr->opts);
+ }
+ return 0;
+}
+
+static bool ut_attach_cb_called = false;
+static void
+dummy_attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
+{
+ ut_attach_cb_called = true;
+}
+
+static void
+test_spdk_nvme_probe(void)
+{
+ int rc = 0;
+ const struct spdk_nvme_transport_id *trid = NULL;
+ void *cb_ctx = NULL;
+ spdk_nvme_probe_cb probe_cb = NULL;
+ spdk_nvme_attach_cb attach_cb = dummy_attach_cb;
+ spdk_nvme_remove_cb remove_cb = NULL;
+ struct spdk_nvme_ctrlr ctrlr;
+ pthread_mutexattr_t attr;
+ struct nvme_driver dummy;
+ g_spdk_nvme_driver = &dummy;
+
+ /* driver init fails */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, NULL);
+ rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb);
+ CU_ASSERT(rc == -1);
+
+ /*
+ * For secondary processes, the attach_cb should automatically get
+ * called for any controllers already initialized by the primary
+ * process.
+ */
+ MOCK_SET(spdk_nvme_transport_available_by_name, false);
+ MOCK_SET(spdk_process_is_primary, true);
+ dummy.initialized = true;
+ g_spdk_nvme_driver = &dummy;
+ rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb);
+ CU_ASSERT(rc == -1);
+
+ /* driver init passes, transport available, secondary call attach_cb */
+ MOCK_SET(spdk_nvme_transport_available_by_name, true);
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, g_spdk_nvme_driver);
+ dummy.initialized = true;
+ memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
+ CU_ASSERT(pthread_mutex_init(&dummy.lock, &attr) == 0);
+ TAILQ_INIT(&dummy.shared_attached_ctrlrs);
+ TAILQ_INSERT_TAIL(&dummy.shared_attached_ctrlrs, &ctrlr, tailq);
+ ut_attach_cb_called = false;
+ /* setup nvme_transport_ctrlr_scan() stub to also check the trype */
+ ut_check_trtype = true;
+ rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_attach_cb_called == true);
+
+ /* driver init passes, transport available, we are primary */
+ MOCK_SET(spdk_process_is_primary, true);
+ rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb);
+ CU_ASSERT(rc == 0);
+
+ g_spdk_nvme_driver = NULL;
+ /* reset to pre-test values */
+ MOCK_CLEAR(spdk_memzone_lookup);
+ ut_check_trtype = false;
+
+ pthread_mutex_destroy(&dummy.lock);
+ pthread_mutexattr_destroy(&attr);
+}
+
+static void
+test_spdk_nvme_connect(void)
+{
+ struct spdk_nvme_ctrlr *ret_ctrlr = NULL;
+ struct spdk_nvme_transport_id trid = {};
+ struct spdk_nvme_ctrlr_opts opts = {};
+ struct spdk_nvme_ctrlr ctrlr;
+ pthread_mutexattr_t attr;
+ struct nvme_driver dummy;
+
+ /* initialize the variable to prepare the test */
+ dummy.initialized = true;
+ TAILQ_INIT(&dummy.shared_attached_ctrlrs);
+ g_spdk_nvme_driver = &dummy;
+ CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
+ CU_ASSERT(pthread_mutex_init(&g_spdk_nvme_driver->lock, &attr) == 0);
+
+ /* set NULL trid pointer to test immediate return */
+ ret_ctrlr = spdk_nvme_connect(NULL, NULL, 0);
+ CU_ASSERT(ret_ctrlr == NULL);
+
+ /* driver init passes, transport available, secondary process connects ctrlr */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, g_spdk_nvme_driver);
+ MOCK_SET(spdk_nvme_transport_available_by_name, true);
+ memset(&trid, 0, sizeof(trid));
+ trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == NULL);
+
+ /* driver init passes, setup one ctrlr on the attached_list */
+ memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ snprintf(ctrlr.trid.traddr, sizeof(ctrlr.trid.traddr), "0000:01:00.0");
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &ctrlr, tailq);
+ /* get the ctrlr from the attached list */
+ snprintf(trid.traddr, sizeof(trid.traddr), "0000:01:00.0");
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ /* get the ctrlr from the attached list with default ctrlr opts */
+ ctrlr.opts.num_io_queues = DEFAULT_MAX_IO_QUEUES;
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
+ /* get the ctrlr from the attached list with default ctrlr opts and consistent opts_size */
+ opts.num_io_queues = 1;
+ ret_ctrlr = spdk_nvme_connect(&trid, &opts, sizeof(opts));
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, 1);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.opts_size, sizeof(opts));
+
+ /* opts_size is 0 */
+ ret_ctrlr = spdk_nvme_connect(&trid, &opts, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.opts_size, 0);
+
+ /* opts_size is less than sizeof(*opts) if opts != NULL */
+ ret_ctrlr = spdk_nvme_connect(&trid, &opts, 4);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, 1);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.opts_size, 4);
+ /* remove the attached ctrlr on the attached_list */
+ CU_ASSERT(spdk_nvme_detach(&ctrlr) == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
+
+ /* driver init passes, transport available, primary process connects ctrlr */
+ MOCK_SET(spdk_process_is_primary, true);
+ /* setup one ctrlr on the attached_list */
+ memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ snprintf(ctrlr.trid.traddr, sizeof(ctrlr.trid.traddr), "0000:02:00.0");
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &ctrlr, tailq);
+ /* get the ctrlr from the attached list */
+ snprintf(trid.traddr, sizeof(trid.traddr), "0000:02:00.0");
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ /* get the ctrlr from the attached list with default ctrlr opts */
+ ctrlr.opts.num_io_queues = DEFAULT_MAX_IO_QUEUES;
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
+ /* get the ctrlr from the attached list with default ctrlr opts and consistent opts_size */
+ opts.num_io_queues = 2;
+ ret_ctrlr = spdk_nvme_connect(&trid, &opts, sizeof(opts));
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, 2);
+ /* remove the attached ctrlr on the attached_list */
+ CU_ASSERT(spdk_nvme_detach(ret_ctrlr) == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
+
+ /* test driver init failure return */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, NULL);
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == NULL);
+}
+
+static struct spdk_nvme_probe_ctx *
+test_nvme_init_get_probe_ctx(void)
+{
+ struct spdk_nvme_probe_ctx *probe_ctx;
+
+ probe_ctx = calloc(1, sizeof(*probe_ctx));
+ SPDK_CU_ASSERT_FATAL(probe_ctx != NULL);
+ TAILQ_INIT(&probe_ctx->init_ctrlrs);
+
+ return probe_ctx;
+}
+
+static void
+test_nvme_init_controllers(void)
+{
+ int rc = 0;
+ struct nvme_driver test_driver;
+ void *cb_ctx = NULL;
+ spdk_nvme_attach_cb attach_cb = dummy_attach_cb;
+ struct spdk_nvme_probe_ctx *probe_ctx;
+ struct spdk_nvme_ctrlr *ctrlr;
+ pthread_mutexattr_t attr;
+
+ g_spdk_nvme_driver = &test_driver;
+ ctrlr = calloc(1, sizeof(*ctrlr));
+ SPDK_CU_ASSERT_FATAL(ctrlr != NULL);
+ ctrlr->trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
+ CU_ASSERT(pthread_mutex_init(&test_driver.lock, &attr) == 0);
+ TAILQ_INIT(&test_driver.shared_attached_ctrlrs);
+
+ /*
+ * Try to initialize, but nvme_ctrlr_process_init will fail.
+ * Verify correct behavior when it does.
+ */
+ MOCK_SET(nvme_ctrlr_process_init, 1);
+ MOCK_SET(spdk_process_is_primary, 1);
+ g_spdk_nvme_driver->initialized = false;
+ ut_destruct_called = false;
+ probe_ctx = test_nvme_init_get_probe_ctx();
+ TAILQ_INSERT_TAIL(&probe_ctx->init_ctrlrs, ctrlr, tailq);
+ probe_ctx->cb_ctx = cb_ctx;
+ probe_ctx->attach_cb = attach_cb;
+ probe_ctx->trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ rc = nvme_init_controllers(probe_ctx);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_spdk_nvme_driver->initialized == true);
+ CU_ASSERT(ut_destruct_called == true);
+
+ /*
+ * Controller init OK, need to move the controller state machine
+ * forward by setting the ctrl state so that it can be moved
+ * the shared_attached_ctrlrs list.
+ */
+ probe_ctx = test_nvme_init_get_probe_ctx();
+ TAILQ_INSERT_TAIL(&probe_ctx->init_ctrlrs, ctrlr, tailq);
+ ctrlr->state = NVME_CTRLR_STATE_READY;
+ MOCK_SET(nvme_ctrlr_process_init, 0);
+ rc = nvme_init_controllers(probe_ctx);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_attach_cb_called == true);
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_attached_ctrlrs));
+ CU_ASSERT(TAILQ_FIRST(&g_spdk_nvme_driver->shared_attached_ctrlrs) == ctrlr);
+ TAILQ_REMOVE(&g_spdk_nvme_driver->shared_attached_ctrlrs, ctrlr, tailq);
+
+ /*
+ * Non-PCIe controllers should be added to the per-process list, not the shared list.
+ */
+ memset(ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ ctrlr->trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+ probe_ctx = test_nvme_init_get_probe_ctx();
+ TAILQ_INSERT_TAIL(&probe_ctx->init_ctrlrs, ctrlr, tailq);
+ ctrlr->state = NVME_CTRLR_STATE_READY;
+ MOCK_SET(nvme_ctrlr_process_init, 0);
+ rc = nvme_init_controllers(probe_ctx);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_attach_cb_called == true);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
+ CU_ASSERT(TAILQ_FIRST(&g_nvme_attached_ctrlrs) == ctrlr);
+ TAILQ_REMOVE(&g_nvme_attached_ctrlrs, ctrlr, tailq);
+ free(ctrlr);
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_attached_ctrlrs));
+
+ g_spdk_nvme_driver = NULL;
+ pthread_mutexattr_destroy(&attr);
+ pthread_mutex_destroy(&test_driver.lock);
+}
+
+static void
+test_nvme_driver_init(void)
+{
+ int rc;
+ struct nvme_driver dummy;
+ g_spdk_nvme_driver = &dummy;
+
+ /* adjust this so testing doesn't take so long */
+ g_nvme_driver_timeout_ms = 100;
+
+ /* process is primary and mem already reserved */
+ MOCK_SET(spdk_process_is_primary, true);
+ dummy.initialized = true;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == 0);
+
+ /*
+ * Process is primary and mem not yet reserved but the call
+ * to spdk_memzone_reserve() returns NULL.
+ */
+ g_spdk_nvme_driver = NULL;
+ MOCK_SET(spdk_process_is_primary, true);
+ MOCK_SET(spdk_memzone_reserve, NULL);
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == -1);
+
+ /* process is not primary, no mem already reserved */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, NULL);
+ g_spdk_nvme_driver = NULL;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == -1);
+
+ /* process is not primary, mem is already reserved & init'd */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, (void *)&dummy);
+ dummy.initialized = true;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == 0);
+
+ /* process is not primary, mem is reserved but not initialized */
+ /* and times out */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_reserve, (void *)&dummy);
+ dummy.initialized = false;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == -1);
+
+ /* process is primary, got mem but mutex won't init */
+ MOCK_SET(spdk_process_is_primary, true);
+ MOCK_SET(spdk_memzone_reserve, (void *)&dummy);
+ MOCK_SET(pthread_mutexattr_init, -1);
+ g_spdk_nvme_driver = NULL;
+ dummy.initialized = true;
+ rc = nvme_driver_init();
+ /* for FreeBSD we can't can't effectively mock this path */
+#ifndef __FreeBSD__
+ CU_ASSERT(rc != 0);
+#else
+ CU_ASSERT(rc == 0);
+#endif
+
+ /* process is primary, got mem, mutex OK */
+ MOCK_SET(spdk_process_is_primary, true);
+ MOCK_CLEAR(pthread_mutexattr_init);
+ g_spdk_nvme_driver = NULL;
+ rc = nvme_driver_init();
+ CU_ASSERT(g_spdk_nvme_driver->initialized == false);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
+ CU_ASSERT(rc == 0);
+
+ g_spdk_nvme_driver = NULL;
+ MOCK_CLEAR(spdk_memzone_reserve);
+ MOCK_CLEAR(spdk_memzone_lookup);
+}
+
+static void
+test_spdk_nvme_detach(void)
+{
+ int rc = 1;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_ctrlr *ret_ctrlr;
+ struct nvme_driver test_driver;
+
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+
+ g_spdk_nvme_driver = &test_driver;
+ TAILQ_INIT(&test_driver.shared_attached_ctrlrs);
+ TAILQ_INSERT_TAIL(&test_driver.shared_attached_ctrlrs, &ctrlr, tailq);
+ CU_ASSERT(pthread_mutex_init(&test_driver.lock, NULL) == 0);
+
+ /*
+ * Controllers are ref counted so mock the function that returns
+ * the ref count so that detach will actually call the destruct
+ * function which we've mocked simply to verify that it gets
+ * called (we aren't testing what the real destruct function does
+ * here.)
+ */
+ MOCK_SET(nvme_ctrlr_get_ref_count, 0);
+ rc = spdk_nvme_detach(&ctrlr);
+ ret_ctrlr = TAILQ_FIRST(&test_driver.shared_attached_ctrlrs);
+ CU_ASSERT(ret_ctrlr == NULL);
+ CU_ASSERT(ut_destruct_called == true);
+ CU_ASSERT(rc == 0);
+
+ /*
+ * Mock the ref count to 1 so we confirm that the destruct
+ * function is not called and that attached ctrl list is
+ * not empty.
+ */
+ MOCK_SET(nvme_ctrlr_get_ref_count, 1);
+ TAILQ_INSERT_TAIL(&test_driver.shared_attached_ctrlrs, &ctrlr, tailq);
+ ut_destruct_called = false;
+ rc = spdk_nvme_detach(&ctrlr);
+ ret_ctrlr = TAILQ_FIRST(&test_driver.shared_attached_ctrlrs);
+ CU_ASSERT(ret_ctrlr != NULL);
+ CU_ASSERT(ut_destruct_called == false);
+ CU_ASSERT(rc == 0);
+
+ /*
+ * Non-PCIe controllers should be on the per-process attached_ctrlrs list, not the
+ * shared_attached_ctrlrs list. Test an RDMA controller and ensure it is removed
+ * from the correct list.
+ */
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+ TAILQ_INIT(&g_nvme_attached_ctrlrs);
+ TAILQ_INSERT_TAIL(&g_nvme_attached_ctrlrs, &ctrlr, tailq);
+ MOCK_SET(nvme_ctrlr_get_ref_count, 0);
+ rc = spdk_nvme_detach(&ctrlr);
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_attached_ctrlrs));
+ CU_ASSERT(ut_destruct_called == true);
+ CU_ASSERT(rc == 0);
+
+ g_spdk_nvme_driver = NULL;
+ pthread_mutex_destroy(&test_driver.lock);
+}
+
+static void
+test_nvme_completion_poll_cb(void)
+{
+ struct nvme_completion_poll_status *status;
+ struct spdk_nvme_cpl cpl;
+
+ status = calloc(1, sizeof(*status));
+ SPDK_CU_ASSERT_FATAL(status != NULL);
+
+ memset(&cpl, 0xff, sizeof(cpl));
+
+ nvme_completion_poll_cb(status, &cpl);
+ CU_ASSERT(status->done == true);
+ CU_ASSERT(memcmp(&cpl, &status->cpl,
+ sizeof(struct spdk_nvme_cpl)) == 0);
+
+ free(status);
+}
+
+/* stub callback used by test_nvme_user_copy_cmd_complete() */
+static struct spdk_nvme_cpl ut_spdk_nvme_cpl = {0};
+static void
+dummy_cb(void *user_cb_arg, struct spdk_nvme_cpl *cpl)
+{
+ ut_spdk_nvme_cpl = *cpl;
+}
+
+static void
+test_nvme_user_copy_cmd_complete(void)
+{
+ struct nvme_request req;
+ int test_data = 0xdeadbeef;
+ int buff_size = sizeof(int);
+ void *buff;
+ static struct spdk_nvme_cpl cpl;
+
+ memset(&req, 0, sizeof(req));
+ memset(&cpl, 0x5a, sizeof(cpl));
+
+ /* test without a user buffer provided */
+ req.user_cb_fn = (void *)dummy_cb;
+ nvme_user_copy_cmd_complete(&req, &cpl);
+ CU_ASSERT(memcmp(&ut_spdk_nvme_cpl, &cpl, sizeof(cpl)) == 0);
+
+ /* test with a user buffer provided */
+ req.user_buffer = malloc(buff_size);
+ SPDK_CU_ASSERT_FATAL(req.user_buffer != NULL);
+ memset(req.user_buffer, 0, buff_size);
+ req.payload_size = buff_size;
+ buff = spdk_zmalloc(buff_size, 0x100, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ SPDK_CU_ASSERT_FATAL(buff != NULL);
+ req.payload = NVME_PAYLOAD_CONTIG(buff, NULL);
+ memcpy(buff, &test_data, buff_size);
+ req.cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
+ req.pid = getpid();
+
+ /* zero out the test value set in the callback */
+ memset(&ut_spdk_nvme_cpl, 0, sizeof(ut_spdk_nvme_cpl));
+
+ nvme_user_copy_cmd_complete(&req, &cpl);
+ CU_ASSERT(memcmp(req.user_buffer, &test_data, buff_size) == 0);
+ CU_ASSERT(memcmp(&ut_spdk_nvme_cpl, &cpl, sizeof(cpl)) == 0);
+
+ /*
+ * Now test the same path as above but this time choose an opc
+ * that results in a different data transfer type.
+ */
+ memset(&ut_spdk_nvme_cpl, 0, sizeof(ut_spdk_nvme_cpl));
+ memset(req.user_buffer, 0, buff_size);
+ buff = spdk_zmalloc(buff_size, 0x100, NULL, SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
+ SPDK_CU_ASSERT_FATAL(buff != NULL);
+ req.payload = NVME_PAYLOAD_CONTIG(buff, NULL);
+ memcpy(buff, &test_data, buff_size);
+ req.cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ nvme_user_copy_cmd_complete(&req, &cpl);
+ CU_ASSERT(memcmp(req.user_buffer, &test_data, buff_size) != 0);
+ CU_ASSERT(memcmp(&ut_spdk_nvme_cpl, &cpl, sizeof(cpl)) == 0);
+
+ /* clean up */
+ free(req.user_buffer);
+}
+
+static void
+test_nvme_allocate_request_null(void)
+{
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = (spdk_nvme_cmd_cb)0x1234;
+ void *cb_arg = (void *)0x5678;
+ struct nvme_request *req = NULL;
+ struct nvme_request dummy_req;
+
+ STAILQ_INIT(&qpair.free_req);
+ STAILQ_INIT(&qpair.queued_req);
+
+ /*
+ * Put a dummy on the queue so we can make a request
+ * and confirm that what comes back is what we expect.
+ */
+ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
+
+ req = nvme_allocate_request_null(&qpair, cb_fn, cb_arg);
+
+ /*
+ * Compare the req with the parmaters that we passed in
+ * as well as what the function is supposed to update.
+ */
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ CU_ASSERT(req->cb_fn == cb_fn);
+ CU_ASSERT(req->cb_arg == cb_arg);
+ CU_ASSERT(req->pid == getpid());
+ CU_ASSERT(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);
+ CU_ASSERT(req->payload.md == NULL);
+ CU_ASSERT(req->payload.contig_or_cb_arg == NULL);
+}
+
+static void
+test_nvme_allocate_request(void)
+{
+ struct spdk_nvme_qpair qpair;
+ struct nvme_payload payload;
+ uint32_t payload_struct_size = sizeof(payload);
+ spdk_nvme_cmd_cb cb_fn = (spdk_nvme_cmd_cb)0x1234;
+ void *cb_arg = (void *)0x6789;
+ struct nvme_request *req = NULL;
+ struct nvme_request dummy_req;
+
+ /* Fill the whole payload struct with a known pattern */
+ memset(&payload, 0x5a, payload_struct_size);
+ STAILQ_INIT(&qpair.free_req);
+ STAILQ_INIT(&qpair.queued_req);
+
+ /* Test trying to allocate a request when no requests are available */
+ req = nvme_allocate_request(&qpair, &payload, payload_struct_size, 0,
+ cb_fn, cb_arg);
+ CU_ASSERT(req == NULL);
+
+ /* put a dummy on the queue, and then allocate one */
+ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
+ req = nvme_allocate_request(&qpair, &payload, payload_struct_size, 0,
+ cb_fn, cb_arg);
+
+ /* all the req elements should now match the passed in parameters */
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ CU_ASSERT(req->cb_fn == cb_fn);
+ CU_ASSERT(req->cb_arg == cb_arg);
+ CU_ASSERT(memcmp(&req->payload, &payload, payload_struct_size) == 0);
+ CU_ASSERT(req->payload_size == payload_struct_size);
+ CU_ASSERT(req->pid == getpid());
+}
+
+static void
+test_nvme_free_request(void)
+{
+ struct nvme_request match_req;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request *req;
+
+ /* put a req on the Q, take it off and compare */
+ memset(&match_req.cmd, 0x5a, sizeof(struct spdk_nvme_cmd));
+ match_req.qpair = &qpair;
+ /* the code under tests asserts this condition */
+ match_req.num_children = 0;
+ STAILQ_INIT(&qpair.free_req);
+
+ nvme_free_request(&match_req);
+ req = STAILQ_FIRST(&match_req.qpair->free_req);
+ CU_ASSERT(req == &match_req);
+}
+
+static void
+test_nvme_allocate_request_user_copy(void)
+{
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = (spdk_nvme_cmd_cb)0x12345;
+ void *cb_arg = (void *)0x12345;
+ bool host_to_controller = true;
+ struct nvme_request *req;
+ struct nvme_request dummy_req;
+ int test_data = 0xdeadbeef;
+ void *buffer = NULL;
+ uint32_t payload_size = sizeof(int);
+
+ STAILQ_INIT(&qpair.free_req);
+ STAILQ_INIT(&qpair.queued_req);
+
+ /* no buffer or valid payload size, early NULL return */
+ req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
+ cb_arg, host_to_controller);
+ CU_ASSERT(req == NULL);
+
+ /* good buffer and valid payload size */
+ buffer = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ memcpy(buffer, &test_data, payload_size);
+
+ /* put a dummy on the queue */
+ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
+
+ MOCK_CLEAR(spdk_malloc);
+ MOCK_CLEAR(spdk_zmalloc);
+ req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
+ cb_arg, host_to_controller);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ CU_ASSERT(req->user_cb_fn == cb_fn);
+ CU_ASSERT(req->user_cb_arg == cb_arg);
+ CU_ASSERT(req->user_buffer == buffer);
+ CU_ASSERT(req->cb_arg == req);
+ CU_ASSERT(memcmp(req->payload.contig_or_cb_arg, buffer, payload_size) == 0);
+ spdk_free(req->payload.contig_or_cb_arg);
+
+ /* same thing but additional path coverage, no copy */
+ host_to_controller = false;
+ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
+
+ req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
+ cb_arg, host_to_controller);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ CU_ASSERT(req->user_cb_fn == cb_fn);
+ CU_ASSERT(req->user_cb_arg == cb_arg);
+ CU_ASSERT(req->user_buffer == buffer);
+ CU_ASSERT(req->cb_arg == req);
+ CU_ASSERT(memcmp(req->payload.contig_or_cb_arg, buffer, payload_size) != 0);
+ spdk_free(req->payload.contig_or_cb_arg);
+
+ /* good buffer and valid payload size but make spdk_zmalloc fail */
+ /* set the mock pointer to NULL for spdk_zmalloc */
+ MOCK_SET(spdk_zmalloc, NULL);
+ req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
+ cb_arg, host_to_controller);
+ CU_ASSERT(req == NULL);
+ free(buffer);
+ MOCK_CLEAR(spdk_zmalloc);
+}
+
+static void
+test_nvme_ctrlr_probe(void)
+{
+ int rc = 0;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_qpair qpair = {};
+ const struct spdk_nvme_transport_id trid = {};
+ struct spdk_nvme_probe_ctx probe_ctx = {};
+ void *devhandle = NULL;
+ void *cb_ctx = NULL;
+ struct spdk_nvme_ctrlr *dummy = NULL;
+
+ ctrlr.adminq = &qpair;
+
+ TAILQ_INIT(&probe_ctx.init_ctrlrs);
+ nvme_driver_init();
+
+ /* test when probe_cb returns false */
+
+ MOCK_SET(dummy_probe_cb, false);
+ nvme_probe_ctx_init(&probe_ctx, &trid, cb_ctx, dummy_probe_cb, NULL, NULL);
+ rc = nvme_ctrlr_probe(&trid, &probe_ctx, devhandle);
+ CU_ASSERT(rc == 1);
+
+ /* probe_cb returns true but we can't construct a ctrl */
+ MOCK_SET(dummy_probe_cb, true);
+ MOCK_SET(nvme_transport_ctrlr_construct, NULL);
+ nvme_probe_ctx_init(&probe_ctx, &trid, cb_ctx, dummy_probe_cb, NULL, NULL);
+ rc = nvme_ctrlr_probe(&trid, &probe_ctx, devhandle);
+ CU_ASSERT(rc == -1);
+
+ /* happy path */
+ MOCK_SET(dummy_probe_cb, true);
+ MOCK_SET(nvme_transport_ctrlr_construct, &ctrlr);
+ nvme_probe_ctx_init(&probe_ctx, &trid, cb_ctx, dummy_probe_cb, NULL, NULL);
+ rc = nvme_ctrlr_probe(&trid, &probe_ctx, devhandle);
+ CU_ASSERT(rc == 0);
+ dummy = TAILQ_FIRST(&probe_ctx.init_ctrlrs);
+ SPDK_CU_ASSERT_FATAL(dummy != NULL);
+ CU_ASSERT(dummy == ut_nvme_transport_ctrlr_construct);
+ TAILQ_REMOVE(&probe_ctx.init_ctrlrs, dummy, tailq);
+ MOCK_CLEAR_P(nvme_transport_ctrlr_construct);
+
+ free(g_spdk_nvme_driver);
+}
+
+static void
+test_nvme_robust_mutex_init_shared(void)
+{
+ pthread_mutex_t mtx;
+ int rc = 0;
+
+ /* test where both pthread calls succeed */
+ MOCK_SET(pthread_mutexattr_init, 0);
+ MOCK_SET(pthread_mutex_init, 0);
+ rc = nvme_robust_mutex_init_shared(&mtx);
+ CU_ASSERT(rc == 0);
+
+ /* test where we can't init attr's but init mutex works */
+ MOCK_SET(pthread_mutexattr_init, -1);
+ MOCK_SET(pthread_mutex_init, 0);
+ rc = nvme_robust_mutex_init_shared(&mtx);
+ /* for FreeBSD the only possible return value is 0 */
+#ifndef __FreeBSD__
+ CU_ASSERT(rc != 0);
+#else
+ CU_ASSERT(rc == 0);
+#endif
+
+ /* test where we can init attr's but the mutex init fails */
+ MOCK_SET(pthread_mutexattr_init, 0);
+ MOCK_SET(pthread_mutex_init, -1);
+ rc = nvme_robust_mutex_init_shared(&mtx);
+ /* for FreeBSD the only possible return value is 0 */
+#ifndef __FreeBSD__
+ CU_ASSERT(rc != 0);
+#else
+ CU_ASSERT(rc == 0);
+#endif
+}
+
+static void
+test_opc_data_transfer(void)
+{
+ enum spdk_nvme_data_transfer xfer;
+
+ xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_FLUSH);
+ CU_ASSERT(xfer == SPDK_NVME_DATA_NONE);
+
+ xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_WRITE);
+ CU_ASSERT(xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
+
+ xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_READ);
+ CU_ASSERT(xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
+
+ xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
+}
+
+static void
+test_trid_parse_and_compare(void)
+{
+ struct spdk_nvme_transport_id trid1, trid2;
+ int ret;
+
+ /* set trid1 trid2 value to id parse */
+ ret = spdk_nvme_transport_id_parse(NULL, "trtype:PCIe traddr:0000:04:00.0");
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ ret = spdk_nvme_transport_id_parse(&trid1, NULL);
+ CU_ASSERT(ret == -EINVAL);
+ ret = spdk_nvme_transport_id_parse(NULL, NULL);
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ ret = spdk_nvme_transport_id_parse(&trid1, "trtype-PCIe traddr-0000-04-00.0");
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ ret = spdk_nvme_transport_id_parse(&trid1, "trtype-PCIe traddr-0000-04-00.0-:");
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ ret = spdk_nvme_transport_id_parse(&trid1, " \t\n:");
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1,
+ "trtype:rdma\n"
+ "adrfam:ipv4\n"
+ "traddr:192.168.100.8\n"
+ "trsvcid:4420\n"
+ "subnqn:nqn.2014-08.org.nvmexpress.discovery") == 0);
+ CU_ASSERT(trid1.trtype == SPDK_NVME_TRANSPORT_RDMA);
+ CU_ASSERT(trid1.adrfam == SPDK_NVMF_ADRFAM_IPV4);
+ CU_ASSERT(strcmp(trid1.traddr, "192.168.100.8") == 0);
+ CU_ASSERT(strcmp(trid1.trsvcid, "4420") == 0);
+ CU_ASSERT(strcmp(trid1.subnqn, "nqn.2014-08.org.nvmexpress.discovery") == 0);
+
+ memset(&trid2, 0, sizeof(trid2));
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:0000:04:00.0") == 0);
+ CU_ASSERT(trid2.trtype == SPDK_NVME_TRANSPORT_PCIE);
+ CU_ASSERT(strcmp(trid2.traddr, "0000:04:00.0") == 0);
+
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) != 0);
+
+ /* set trid1 trid2 and test id_compare */
+ memset_trid(&trid1, &trid2);
+ trid1.adrfam = SPDK_NVMF_ADRFAM_IPV6;
+ trid2.adrfam = SPDK_NVMF_ADRFAM_IPV4;
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret > 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.traddr, sizeof(trid1.traddr), "192.168.100.8");
+ snprintf(trid2.traddr, sizeof(trid2.traddr), "192.168.100.9");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret < 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.trsvcid, sizeof(trid1.trsvcid), "4420");
+ snprintf(trid2.trsvcid, sizeof(trid2.trsvcid), "4421");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret < 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.subnqn, sizeof(trid1.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
+ snprintf(trid2.subnqn, sizeof(trid2.subnqn), "subnqn:nqn.2017-08.org.nvmexpress.discovery");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret < 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.subnqn, sizeof(trid1.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
+ snprintf(trid2.subnqn, sizeof(trid2.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret == 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.subnqn, sizeof(trid1.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
+ snprintf(trid2.subnqn, sizeof(trid2.subnqn), "subnqn:nqn.2016-08.org.Nvmexpress.discovery");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret > 0);
+
+ memset_trid(&trid1, &trid2);
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret == 0);
+
+ /* Compare PCI addresses via spdk_pci_addr_compare (rather than as strings) */
+ memset_trid(&trid1, &trid2);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:PCIe traddr:0000:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) == 0);
+
+ memset_trid(&trid1, &trid2);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:PCIe traddr:0000:05:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) > 0);
+
+ memset_trid(&trid1, &trid2);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:PCIe traddr:0000:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:05:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) < 0);
+
+ memset_trid(&trid1, &trid2);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype=PCIe traddr=0000:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype=PCIe traddr=05:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) < 0);
+
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1,
+ "trtype:tcp\n"
+ "adrfam:ipv4\n"
+ "traddr:192.168.100.8\n"
+ "trsvcid:4420\n"
+ "priority:2\n"
+ "subnqn:nqn.2014-08.org.nvmexpress.discovery") == 0);
+ CU_ASSERT(trid1.priority == 2);
+}
+
+static void
+test_spdk_nvme_transport_id_parse_trtype(void)
+{
+
+ enum spdk_nvme_transport_type *trtype;
+ enum spdk_nvme_transport_type sct;
+ char *str;
+
+ trtype = NULL;
+ str = "unit_test";
+
+ /* test function returned value when trtype is NULL but str not NULL */
+ CU_ASSERT(spdk_nvme_transport_id_parse_trtype(trtype, str) == (-EINVAL));
+
+ /* test function returned value when str is NULL but trtype not NULL */
+ trtype = &sct;
+ str = NULL;
+ CU_ASSERT(spdk_nvme_transport_id_parse_trtype(trtype, str) == (-EINVAL));
+
+ /* test function returned value when str and strtype not NULL, but str value
+ * not "PCIe" or "RDMA" */
+ str = "unit_test";
+ CU_ASSERT(spdk_nvme_transport_id_parse_trtype(trtype, str) == 0);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_CUSTOM);
+
+ /* test trtype value when use function "strcasecmp" to compare str and "PCIe",not case-sensitive */
+ str = "PCIe";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_PCIE);
+
+ str = "pciE";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_PCIE);
+
+ /* test trtype value when use function "strcasecmp" to compare str and "RDMA",not case-sensitive */
+ str = "RDMA";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_RDMA);
+
+ str = "rdma";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_RDMA);
+
+ /* test trtype value when use function "strcasecmp" to compare str and "FC",not case-sensitive */
+ str = "FC";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_FC);
+
+ str = "fc";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_FC);
+
+ /* test trtype value when use function "strcasecmp" to compare str and "TCP",not case-sensitive */
+ str = "TCP";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_TCP);
+
+ str = "tcp";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_TCP);
+}
+
+static void
+test_spdk_nvme_transport_id_parse_adrfam(void)
+{
+
+ enum spdk_nvmf_adrfam *adrfam;
+ enum spdk_nvmf_adrfam sct;
+ char *str;
+
+ adrfam = NULL;
+ str = "unit_test";
+
+ /* test function returned value when adrfam is NULL but str not NULL */
+ CU_ASSERT(spdk_nvme_transport_id_parse_adrfam(adrfam, str) == (-EINVAL));
+
+ /* test function returned value when str is NULL but adrfam not NULL */
+ adrfam = &sct;
+ str = NULL;
+ CU_ASSERT(spdk_nvme_transport_id_parse_adrfam(adrfam, str) == (-EINVAL));
+
+ /* test function returned value when str and adrfam not NULL, but str value
+ * not "IPv4" or "IPv6" or "IB" or "FC" */
+ str = "unit_test";
+ CU_ASSERT(spdk_nvme_transport_id_parse_adrfam(adrfam, str) == (-ENOENT));
+
+ /* test adrfam value when use function "strcasecmp" to compare str and "IPv4",not case-sensitive */
+ str = "IPv4";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV4);
+
+ str = "ipV4";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV4);
+
+ /* test adrfam value when use function "strcasecmp" to compare str and "IPv6",not case-sensitive */
+ str = "IPv6";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV6);
+
+ str = "ipV6";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV6);
+
+ /* test adrfam value when use function "strcasecmp" to compare str and "IB",not case-sensitive */
+ str = "IB";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IB);
+
+ str = "ib";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IB);
+
+ /* test adrfam value when use function "strcasecmp" to compare str and "FC",not case-sensitive */
+ str = "FC";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_FC);
+
+ str = "fc";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_FC);
+
+}
+
+static void
+test_trid_trtype_str(void)
+{
+ const char *s;
+
+ s = spdk_nvme_transport_id_trtype_str(-5);
+ CU_ASSERT(s == NULL);
+
+ s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_PCIE);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "PCIe") == 0);
+
+ s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_RDMA);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "RDMA") == 0);
+
+ s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_FC);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "FC") == 0);
+
+ s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_TCP);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "TCP") == 0);
+}
+
+static void
+test_trid_adrfam_str(void)
+{
+ const char *s;
+
+ s = spdk_nvme_transport_id_adrfam_str(-5);
+ CU_ASSERT(s == NULL);
+
+ s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_IPV4);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "IPv4") == 0);
+
+ s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_IPV6);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "IPv6") == 0);
+
+ s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_IB);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "IB") == 0);
+
+ s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_FC);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "FC") == 0);
+}
+
+/* stub callback used by the test_nvme_request_check_timeout */
+static bool ut_timeout_cb_call = false;
+static void
+dummy_timeout_cb(void *cb_arg, struct spdk_nvme_ctrlr *ctrlr,
+ struct spdk_nvme_qpair *qpair, uint16_t cid)
+{
+ ut_timeout_cb_call = true;
+}
+
+static void
+test_nvme_request_check_timeout(void)
+{
+ int rc;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request req;
+ struct spdk_nvme_ctrlr_process active_proc;
+ uint16_t cid = 0;
+ uint64_t now_tick = 0;
+
+ memset(&qpair, 0x0, sizeof(qpair));
+ memset(&req, 0x0, sizeof(req));
+ memset(&active_proc, 0x0, sizeof(active_proc));
+ req.qpair = &qpair;
+ active_proc.timeout_cb_fn = dummy_timeout_cb;
+
+ /* if have called timeout_cb_fn then return directly */
+ req.timed_out = true;
+ rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_timeout_cb_call == false);
+
+ /* if timeout isn't enabled then return directly */
+ req.timed_out = false;
+ req.submit_tick = 0;
+ rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_timeout_cb_call == false);
+
+ /* req->pid isn't right then return directly */
+ req.submit_tick = 1;
+ req.pid = g_spdk_nvme_pid + 1;
+ rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_timeout_cb_call == false);
+
+ /* AER command has no timeout */
+ req.pid = g_spdk_nvme_pid;
+ req.cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
+ rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_timeout_cb_call == false);
+
+ /* time isn't out */
+ qpair.id = 1;
+ rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
+ CU_ASSERT(rc == 1);
+ CU_ASSERT(ut_timeout_cb_call == false);
+
+ now_tick = 2;
+ rc = nvme_request_check_timeout(&req, cid, &active_proc, now_tick);
+ CU_ASSERT(req.timed_out == true);
+ CU_ASSERT(ut_timeout_cb_call == true);
+ CU_ASSERT(rc == 0);
+}
+
+struct nvme_completion_poll_status g_status;
+uint64_t completion_delay, timeout_in_secs;
+int g_process_comp_result;
+
+int
+spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ spdk_delay_us(completion_delay * spdk_get_ticks_hz());
+
+ g_status.done = completion_delay < timeout_in_secs && g_process_comp_result == 0 ? true : false;
+
+ return g_process_comp_result;
+}
+
+static void
+test_nvme_wait_for_completion(void)
+{
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+
+ memset(&qpair, 0, sizeof(qpair));
+
+ /* completion timeout */
+ memset(&g_status, 0, sizeof(g_status));
+ completion_delay = 2;
+ timeout_in_secs = 1;
+ rc = nvme_wait_for_completion_timeout(&qpair, &g_status, timeout_in_secs);
+ CU_ASSERT(g_status.timed_out == true);
+ CU_ASSERT(g_status.done == false);
+ CU_ASSERT(rc == -ECANCELED);
+
+ /* spdk_nvme_qpair_process_completions returns error */
+ memset(&g_status, 0, sizeof(g_status));
+ g_process_comp_result = -1;
+ completion_delay = 1;
+ timeout_in_secs = 2;
+ rc = nvme_wait_for_completion_timeout(&qpair, &g_status, timeout_in_secs);
+ CU_ASSERT(rc == -ECANCELED);
+ CU_ASSERT(g_status.timed_out == true);
+ CU_ASSERT(g_status.done == false);
+ CU_ASSERT(g_status.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(g_status.cpl.status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION);
+
+ g_process_comp_result = 0;
+
+ /* complete in time */
+ memset(&g_status, 0, sizeof(g_status));
+ completion_delay = 1;
+ timeout_in_secs = 2;
+ rc = nvme_wait_for_completion_timeout(&qpair, &g_status, timeout_in_secs);
+ CU_ASSERT(g_status.timed_out == false);
+ CU_ASSERT(g_status.done == true);
+ CU_ASSERT(rc == 0);
+
+ /* nvme_wait_for_completion */
+ /* spdk_nvme_qpair_process_completions returns error */
+ memset(&g_status, 0, sizeof(g_status));
+ g_process_comp_result = -1;
+ rc = nvme_wait_for_completion(&qpair, &g_status);
+ CU_ASSERT(rc == -ECANCELED);
+ CU_ASSERT(g_status.timed_out == true);
+ CU_ASSERT(g_status.done == false);
+ CU_ASSERT(g_status.cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(g_status.cpl.status.sc == SPDK_NVME_SC_ABORTED_SQ_DELETION);
+
+ /* successful completion */
+ memset(&g_status, 0, sizeof(g_status));
+ g_process_comp_result = 0;
+ rc = nvme_wait_for_completion(&qpair, &g_status);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_status.timed_out == false);
+ CU_ASSERT(g_status.done == true);
+}
+
+static void
+test_nvme_ctrlr_probe_internal(void)
+{
+ struct spdk_nvme_probe_ctx *probe_ctx;
+ struct spdk_nvme_transport_id trid = {};
+ struct nvme_driver dummy;
+ int rc;
+
+ probe_ctx = calloc(1, sizeof(*probe_ctx));
+ CU_ASSERT(probe_ctx != NULL);
+
+ MOCK_SET(spdk_process_is_primary, true);
+ MOCK_SET(spdk_memzone_reserve, (void *)&dummy);
+ g_spdk_nvme_driver = NULL;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == 0);
+
+ ut_test_probe_internal = true;
+ MOCK_SET(dummy_probe_cb, true);
+ trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ nvme_probe_ctx_init(probe_ctx, &trid, NULL, dummy_probe_cb, NULL, NULL);
+ rc = nvme_probe_internal(probe_ctx, false);
+ CU_ASSERT(rc < 0);
+ CU_ASSERT(TAILQ_EMPTY(&probe_ctx->init_ctrlrs));
+
+ free(probe_ctx);
+ ut_test_probe_internal = false;
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_opc_data_transfer);
+ CU_ADD_TEST(suite, test_spdk_nvme_transport_id_parse_trtype);
+ CU_ADD_TEST(suite, test_spdk_nvme_transport_id_parse_adrfam);
+ CU_ADD_TEST(suite, test_trid_parse_and_compare);
+ CU_ADD_TEST(suite, test_trid_trtype_str);
+ CU_ADD_TEST(suite, test_trid_adrfam_str);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_probe);
+ CU_ADD_TEST(suite, test_spdk_nvme_probe);
+ CU_ADD_TEST(suite, test_spdk_nvme_connect);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_probe_internal);
+ CU_ADD_TEST(suite, test_nvme_init_controllers);
+ CU_ADD_TEST(suite, test_nvme_driver_init);
+ CU_ADD_TEST(suite, test_spdk_nvme_detach);
+ CU_ADD_TEST(suite, test_nvme_completion_poll_cb);
+ CU_ADD_TEST(suite, test_nvme_user_copy_cmd_complete);
+ CU_ADD_TEST(suite, test_nvme_allocate_request_null);
+ CU_ADD_TEST(suite, test_nvme_allocate_request);
+ CU_ADD_TEST(suite, test_nvme_free_request);
+ CU_ADD_TEST(suite, test_nvme_allocate_request_user_copy);
+ CU_ADD_TEST(suite, test_nvme_robust_mutex_init_shared);
+ CU_ADD_TEST(suite, test_nvme_request_check_timeout);
+ CU_ADD_TEST(suite, test_nvme_wait_for_completion);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/.gitignore
new file mode 100644
index 000000000..97a75bee8
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/.gitignore
@@ -0,0 +1 @@
+nvme_ctrlr_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/Makefile
new file mode 100644
index 000000000..3ce33dc4e
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ctrlr_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c
new file mode 100644
index 000000000..f5b374639
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c
@@ -0,0 +1,2150 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2020 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "spdk_internal/log.h"
+
+#include "common/lib/test_env.c"
+
+struct spdk_log_flag SPDK_LOG_NVME = {
+ .name = "nvme",
+ .enabled = false,
+};
+
+#include "nvme/nvme_ctrlr.c"
+#include "nvme/nvme_quirks.c"
+
+pid_t g_spdk_nvme_pid;
+
+struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+struct nvme_driver *g_spdk_nvme_driver = &_g_nvme_driver;
+
+struct spdk_nvme_registers g_ut_nvme_regs = {};
+
+__thread int nvme_thread_ioq_index = -1;
+
+uint32_t set_size = 1;
+
+int set_status_cpl = -1;
+
+DEFINE_STUB(nvme_ctrlr_cmd_set_host_id, int,
+ (struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
+DEFINE_STUB_V(nvme_ns_set_identify_data, (struct spdk_nvme_ns *ns));
+DEFINE_STUB_V(nvme_qpair_abort_reqs, (struct spdk_nvme_qpair *qpair, uint32_t dnr));
+DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group,
+ struct spdk_nvme_qpair *qpair), 0);
+DEFINE_STUB_V(nvme_io_msg_ctrlr_update, (struct spdk_nvme_ctrlr *ctrlr));
+
+struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ void *devhandle)
+{
+ return NULL;
+}
+
+int
+nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+ nvme_ctrlr_destruct_finish(ctrlr);
+
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
+{
+ SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
+ *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
+{
+ SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
+ *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
+{
+ SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
+ *value = *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset);
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
+{
+ SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
+ *value = *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset);
+ return 0;
+}
+
+uint32_t
+nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return UINT32_MAX;
+}
+
+uint16_t
+nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 1;
+}
+
+void *
+nvme_transport_ctrlr_map_cmb(struct spdk_nvme_ctrlr *ctrlr, size_t *size)
+{
+ return NULL;
+}
+
+int
+nvme_transport_ctrlr_unmap_cmb(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+struct spdk_nvme_qpair *
+nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
+ const struct spdk_nvme_io_qpair_opts *opts)
+{
+ struct spdk_nvme_qpair *qpair;
+
+ qpair = calloc(1, sizeof(*qpair));
+ SPDK_CU_ASSERT_FATAL(qpair != NULL);
+
+ qpair->ctrlr = ctrlr;
+ qpair->id = qid;
+ qpair->qprio = opts->qprio;
+
+ return qpair;
+}
+
+int
+nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
+{
+ free(qpair);
+ return 0;
+}
+
+void
+nvme_transport_ctrlr_disconnect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
+{
+}
+
+int
+nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
+{
+ return 0;
+}
+
+void
+nvme_transport_admin_qpair_abort_aers(struct spdk_nvme_qpair *qpair)
+{
+}
+
+void
+nvme_transport_qpair_abort_reqs(struct spdk_nvme_qpair *qpair, uint32_t dnr)
+{
+}
+
+int
+nvme_driver_init(void)
+{
+ return 0;
+}
+
+int nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
+ struct spdk_nvme_ctrlr *ctrlr,
+ enum spdk_nvme_qprio qprio,
+ uint32_t num_requests)
+{
+ qpair->id = id;
+ qpair->qprio = qprio;
+ qpair->ctrlr = ctrlr;
+
+ return 0;
+}
+
+static struct spdk_nvme_cpl fake_cpl = {};
+static enum spdk_nvme_generic_command_status_code set_status_code = SPDK_NVME_SC_SUCCESS;
+
+static void
+fake_cpl_sc(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl.status.sc = set_status_code;
+ cb_fn(cb_arg, &fake_cpl);
+}
+
+int
+spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
+ uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ CU_ASSERT(0);
+ return -1;
+}
+
+int
+spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
+ uint32_t cdw11, void *payload, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl_sc(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
+ uint32_t nsid, void *payload, uint32_t payload_size,
+ uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl_sc(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST);
+
+ /*
+ * For the purposes of this unit test, we don't need to bother emulating request submission.
+ */
+
+ return 0;
+}
+
+static int32_t g_wait_for_completion_return_val;
+
+int32_t
+spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ return g_wait_for_completion_return_val;
+}
+
+void
+nvme_qpair_complete_error_reqs(struct spdk_nvme_qpair *qpair)
+{
+}
+
+
+void
+nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ struct nvme_completion_poll_status *status = arg;
+ /* This should not happen it test env since this callback is always called
+ * before wait_for_completion_* while this field can only be set to true in
+ * wait_for_completion_* functions */
+ CU_ASSERT(status->timed_out == false);
+
+ status->cpl = *cpl;
+ status->done = true;
+}
+
+static struct nvme_completion_poll_status *g_failed_status;
+
+int
+nvme_wait_for_completion_robust_lock(
+ struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status,
+ pthread_mutex_t *robust_mutex)
+{
+ if (spdk_nvme_qpair_process_completions(qpair, 0) < 0) {
+ g_failed_status = status;
+ status->timed_out = true;
+ return -1;
+ }
+
+ status->done = true;
+ if (set_status_cpl == 1) {
+ status->cpl.status.sc = 1;
+ }
+ return spdk_nvme_cpl_is_error(&status->cpl) ? -EIO : 0;
+}
+
+int
+nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status)
+{
+ return nvme_wait_for_completion_robust_lock(qpair, status, NULL);
+}
+
+int
+nvme_wait_for_completion_timeout(struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status,
+ uint64_t timeout_in_secs)
+{
+ return nvme_wait_for_completion_robust_lock(qpair, status, NULL);
+}
+
+int
+nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
+ union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
+ void *cb_arg)
+{
+ fake_cpl_sc(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
+ void *payload, size_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ if (cns == SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST) {
+ uint32_t count = 0;
+ uint32_t i = 0;
+ struct spdk_nvme_ns_list *ns_list = (struct spdk_nvme_ns_list *)payload;
+
+ for (i = 1; i <= ctrlr->num_ns; i++) {
+ if (i <= nsid) {
+ continue;
+ }
+
+ ns_list->ns_list[count++] = i;
+ if (count == SPDK_COUNTOF(ns_list->ns_list)) {
+ break;
+ }
+ }
+
+ }
+
+ fake_cpl_sc(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
+ uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl_sc(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ CU_ASSERT(0);
+ return -1;
+}
+
+int
+nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
+ struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
+ struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
+ void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_fw_commit *fw_commit,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ CU_ASSERT(fw_commit->ca == SPDK_NVME_FW_COMMIT_REPLACE_IMG);
+ if (fw_commit->fs == 0) {
+ return -1;
+ }
+ set_status_cpl = 1;
+ if (ctrlr->is_resetting == true) {
+ set_status_cpl = 0;
+ }
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
+ uint32_t size, uint32_t offset, void *payload,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ if ((size != 0 && payload == NULL) || (size == 0 && payload != NULL)) {
+ return -1;
+ }
+ CU_ASSERT(offset == 0);
+ return 0;
+}
+
+void
+nvme_ns_destruct(struct spdk_nvme_ns *ns)
+{
+}
+
+int
+nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
+ struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+int
+nvme_ns_update(struct spdk_nvme_ns *ns)
+{
+ return 0;
+}
+
+void
+spdk_pci_device_detach(struct spdk_pci_device *device)
+{
+}
+
+#define DECLARE_AND_CONSTRUCT_CTRLR() \
+ struct spdk_nvme_ctrlr ctrlr = {}; \
+ struct spdk_nvme_qpair adminq = {}; \
+ struct nvme_request req; \
+ \
+ STAILQ_INIT(&adminq.free_req); \
+ STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq); \
+ ctrlr.adminq = &adminq;
+
+static void
+test_nvme_ctrlr_init_en_1_rdy_0(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 1, CSTS.RDY = 0
+ */
+ g_ut_nvme_regs.cc.bits.en = 1;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ * init() should set CC.EN = 0.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Transition to CSTS.RDY = 0.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ /*
+ * Transition to CC.EN = 1
+ */
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_1_rdy_1(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 1, CSTS.RDY = 1
+ * init() should set CC.EN = 0.
+ */
+ g_ut_nvme_regs.cc.bits.en = 1;
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Transition to CSTS.RDY = 0.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ /*
+ * Transition to CC.EN = 1
+ */
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Default round robin enabled
+ */
+ g_ut_nvme_regs.cap.bits.ams = 0x0;
+ ctrlr.cap = g_ut_nvme_regs.cap;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ /*
+ * Case 1: default round robin arbitration mechanism selected
+ */
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 2: weighted round robin arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 3: vendor specific arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 4: invalid arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 5: reset to default round robin arbitration mechanism
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Weighted round robin enabled
+ */
+ g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_WRR;
+ ctrlr.cap = g_ut_nvme_regs.cap;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ /*
+ * Case 1: default round robin arbitration mechanism selected
+ */
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 2: weighted round robin arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 3: vendor specific arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 4: invalid arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 5: reset to weighted round robin arbitration mechanism
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+static void
+test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Default round robin enabled
+ */
+ g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_VS;
+ ctrlr.cap = g_ut_nvme_regs.cap;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ /*
+ * Case 1: default round robin arbitration mechanism selected
+ */
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 2: weighted round robin arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 3: vendor specific arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 4: invalid arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 5: reset to vendor specific arbitration mechanism
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_0_rdy_0(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_0_rdy_1(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 1
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+
+ /*
+ * Transition to CSTS.RDY = 0.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ /*
+ * Transition to CC.EN = 1
+ */
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+setup_qpairs(struct spdk_nvme_ctrlr *ctrlr, uint32_t num_io_queues)
+{
+ uint32_t i;
+
+ CU_ASSERT(pthread_mutex_init(&ctrlr->ctrlr_lock, NULL) == 0);
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(ctrlr) == 0);
+
+ ctrlr->page_size = 0x1000;
+ ctrlr->opts.num_io_queues = num_io_queues;
+ ctrlr->free_io_qids = spdk_bit_array_create(num_io_queues + 1);
+ SPDK_CU_ASSERT_FATAL(ctrlr->free_io_qids != NULL);
+
+ spdk_bit_array_clear(ctrlr->free_io_qids, 0);
+ for (i = 1; i <= num_io_queues; i++) {
+ spdk_bit_array_set(ctrlr->free_io_qids, i);
+ }
+}
+
+static void
+cleanup_qpairs(struct spdk_nvme_ctrlr *ctrlr)
+{
+ nvme_ctrlr_destruct(ctrlr);
+}
+
+static void
+test_alloc_io_qpair_rr_1(void)
+{
+ struct spdk_nvme_io_qpair_opts opts;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_qpair *q0;
+
+ setup_qpairs(&ctrlr, 1);
+
+ /*
+ * Fake to simulate the controller with default round robin
+ * arbitration mechanism.
+ */
+ g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_RR;
+
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
+
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
+ /* Only 1 I/O qpair was allocated, so this should fail */
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0) == NULL);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+
+ /*
+ * Now that the qpair has been returned to the free list,
+ * we should be able to allocate it again.
+ */
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+
+ /* Only 0 qprio is acceptable for default round robin arbitration mechanism */
+ opts.qprio = 1;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 == NULL);
+
+ opts.qprio = 2;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 == NULL);
+
+ opts.qprio = 3;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 == NULL);
+
+ /* Only 0 ~ 3 qprio is acceptable */
+ opts.qprio = 4;
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
+
+ cleanup_qpairs(&ctrlr);
+}
+
+static void
+test_alloc_io_qpair_wrr_1(void)
+{
+ struct spdk_nvme_io_qpair_opts opts;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_qpair *q0, *q1;
+
+ setup_qpairs(&ctrlr, 2);
+
+ /*
+ * Fake to simulate the controller with weighted round robin
+ * arbitration mechanism.
+ */
+ g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
+
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
+
+ /*
+ * Allocate 2 qpairs and free them
+ */
+ opts.qprio = 0;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
+
+ opts.qprio = 1;
+ q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q1 != NULL);
+ SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+
+ /*
+ * Allocate 2 qpairs and free them in the reverse order
+ */
+ opts.qprio = 2;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 2);
+
+ opts.qprio = 3;
+ q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q1 != NULL);
+ SPDK_CU_ASSERT_FATAL(q1->qprio == 3);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
+
+ /* Only 0 ~ 3 qprio is acceptable */
+ opts.qprio = 4;
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
+
+ cleanup_qpairs(&ctrlr);
+}
+
+static void
+test_alloc_io_qpair_wrr_2(void)
+{
+ struct spdk_nvme_io_qpair_opts opts;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_qpair *q0, *q1, *q2, *q3;
+
+ setup_qpairs(&ctrlr, 4);
+
+ /*
+ * Fake to simulate the controller with weighted round robin
+ * arbitration mechanism.
+ */
+ g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
+
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
+
+ opts.qprio = 0;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
+
+ opts.qprio = 1;
+ q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q1 != NULL);
+ SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
+
+ opts.qprio = 2;
+ q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q2 != NULL);
+ SPDK_CU_ASSERT_FATAL(q2->qprio == 2);
+
+ opts.qprio = 3;
+ q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q3 != NULL);
+ SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
+
+ /* Only 4 I/O qpairs was allocated, so this should fail */
+ opts.qprio = 0;
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+
+ /*
+ * Now that the qpair has been returned to the free list,
+ * we should be able to allocate it again.
+ *
+ * Allocate 4 I/O qpairs and half of them with same qprio.
+ */
+ opts.qprio = 1;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 1);
+
+ opts.qprio = 1;
+ q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q1 != NULL);
+ SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
+
+ opts.qprio = 3;
+ q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q2 != NULL);
+ SPDK_CU_ASSERT_FATAL(q2->qprio == 3);
+
+ opts.qprio = 3;
+ q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q3 != NULL);
+ SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
+
+ /*
+ * Free all I/O qpairs in reverse order
+ */
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
+
+ cleanup_qpairs(&ctrlr);
+}
+
+bool g_connect_qpair_called = false;
+int g_connect_qpair_return_code = 0;
+int nvme_transport_ctrlr_connect_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
+{
+ g_connect_qpair_called = true;
+ return g_connect_qpair_return_code;
+}
+
+static void
+test_spdk_nvme_ctrlr_reconnect_io_qpair(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_qpair qpair = {};
+ int rc;
+
+ /* Various states of controller disconnect. */
+ qpair.id = 1;
+ qpair.ctrlr = &ctrlr;
+ ctrlr.is_removed = 1;
+ ctrlr.is_failed = 0;
+ ctrlr.is_resetting = 0;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(rc == -ENODEV)
+
+ ctrlr.is_removed = 0;
+ ctrlr.is_failed = 1;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(rc == -ENXIO)
+
+ ctrlr.is_failed = 0;
+ ctrlr.is_resetting = 1;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(rc == -EAGAIN)
+
+ /* Confirm precedence for controller states: removed > resetting > failed */
+ ctrlr.is_removed = 1;
+ ctrlr.is_failed = 1;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(rc == -ENODEV)
+
+ ctrlr.is_removed = 0;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(rc == -EAGAIN)
+
+ ctrlr.is_resetting = 0;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(rc == -ENXIO)
+
+ /* qpair not failed. Make sure we don't call down to the transport */
+ ctrlr.is_failed = 0;
+ qpair.state = NVME_QPAIR_CONNECTED;
+ g_connect_qpair_called = false;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(g_connect_qpair_called == false);
+ CU_ASSERT(rc == 0)
+
+ /* transport qpair is failed. make sure we call down to the transport */
+ qpair.state = NVME_QPAIR_DISCONNECTED;
+ rc = spdk_nvme_ctrlr_reconnect_io_qpair(&qpair);
+ CU_ASSERT(g_connect_qpair_called == true);
+ CU_ASSERT(rc == 0)
+}
+
+static void
+test_nvme_ctrlr_fail(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ ctrlr.opts.num_io_queues = 0;
+ nvme_ctrlr_fail(&ctrlr, false);
+
+ CU_ASSERT(ctrlr.is_failed == true);
+}
+
+static void
+test_nvme_ctrlr_construct_intel_support_log_page_list(void)
+{
+ bool res;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_intel_log_page_directory payload = {};
+ struct spdk_pci_id pci_id = {};
+
+ /* Get quirks for a device with all 0 vendor/device id */
+ ctrlr.quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT(ctrlr.quirks == 0);
+
+ nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
+ CU_ASSERT(res == false);
+
+ /* Set the vendor to Intel, but provide no device id */
+ pci_id.class_id = SPDK_PCI_CLASS_NVME;
+ ctrlr.cdata.vid = pci_id.vendor_id = SPDK_PCI_VID_INTEL;
+ payload.temperature_statistics_log_len = 1;
+ ctrlr.quirks = nvme_get_quirks(&pci_id);
+ memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
+
+ nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
+ CU_ASSERT(res == false);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
+ CU_ASSERT(res == false);
+
+ /* set valid vendor id, device id and sub device id */
+ ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
+ payload.temperature_statistics_log_len = 0;
+ pci_id.vendor_id = SPDK_PCI_VID_INTEL;
+ pci_id.device_id = 0x0953;
+ pci_id.subvendor_id = SPDK_PCI_VID_INTEL;
+ pci_id.subdevice_id = 0x3702;
+ ctrlr.quirks = nvme_get_quirks(&pci_id);
+ memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
+
+ nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
+ CU_ASSERT(res == false);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
+ CU_ASSERT(res == false);
+}
+
+static void
+test_nvme_ctrlr_set_supported_features(void)
+{
+ bool res;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ /* set a invalid vendor id */
+ ctrlr.cdata.vid = 0xFFFF;
+ nvme_ctrlr_set_supported_features(&ctrlr);
+ res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
+ CU_ASSERT(res == false);
+
+ ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
+ nvme_ctrlr_set_supported_features(&ctrlr);
+ res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
+ CU_ASSERT(res == true);
+}
+
+static void
+test_ctrlr_get_default_ctrlr_opts(void)
+{
+ struct spdk_nvme_ctrlr_opts opts = {};
+
+ CU_ASSERT(spdk_uuid_parse(&g_spdk_nvme_driver->default_extended_host_id,
+ "e53e9258-c93b-48b5-be1a-f025af6d232a") == 0);
+
+ memset(&opts, 0, sizeof(opts));
+
+ /* set a smaller opts_size */
+ CU_ASSERT(sizeof(opts) > 8);
+ spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, 8);
+ CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
+ CU_ASSERT_TRUE(opts.use_cmb_sqs);
+ /* check below fields are not initialized by default value */
+ CU_ASSERT_EQUAL(opts.arb_mechanism, 0);
+ CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 0);
+ CU_ASSERT_EQUAL(opts.io_queue_size, 0);
+ CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
+ for (int i = 0; i < 8; i++) {
+ CU_ASSERT(opts.host_id[i] == 0);
+ }
+ for (int i = 0; i < 16; i++) {
+ CU_ASSERT(opts.extended_host_id[i] == 0);
+ }
+ CU_ASSERT(strlen(opts.hostnqn) == 0);
+ CU_ASSERT(strlen(opts.src_addr) == 0);
+ CU_ASSERT(strlen(opts.src_svcid) == 0);
+ CU_ASSERT_EQUAL(opts.admin_timeout_ms, 0);
+
+ /* set a consistent opts_size */
+ spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, sizeof(opts));
+ CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
+ CU_ASSERT_TRUE(opts.use_cmb_sqs);
+ CU_ASSERT_EQUAL(opts.arb_mechanism, SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 10 * 1000);
+ CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
+ CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
+ for (int i = 0; i < 8; i++) {
+ CU_ASSERT(opts.host_id[i] == 0);
+ }
+ CU_ASSERT_STRING_EQUAL(opts.hostnqn,
+ "2014-08.org.nvmexpress:uuid:e53e9258-c93b-48b5-be1a-f025af6d232a");
+ CU_ASSERT(memcmp(opts.extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
+ sizeof(opts.extended_host_id)) == 0);
+ CU_ASSERT(strlen(opts.src_addr) == 0);
+ CU_ASSERT(strlen(opts.src_svcid) == 0);
+ CU_ASSERT_EQUAL(opts.admin_timeout_ms, NVME_MAX_ADMIN_TIMEOUT_IN_SECS * 1000);
+}
+
+static void
+test_ctrlr_get_default_io_qpair_opts(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_io_qpair_opts opts = {};
+
+ memset(&opts, 0, sizeof(opts));
+
+ /* set a smaller opts_size */
+ ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
+ CU_ASSERT(sizeof(opts) > 8);
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, 8);
+ CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
+ CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
+ /* check below field is not initialized by default value */
+ CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
+
+ /* set a consistent opts_size */
+ ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
+ ctrlr.opts.io_queue_requests = DEFAULT_IO_QUEUE_REQUESTS;
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
+ CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
+ CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
+ CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
+}
+
+#if 0 /* TODO: move to PCIe-specific unit test */
+static void
+test_nvme_ctrlr_alloc_cmb(void)
+{
+ int rc;
+ uint64_t offset;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ ctrlr.cmb_size = 0x1000000;
+ ctrlr.cmb_current_offset = 0x100;
+ rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x200, 0x1000, &offset);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(offset == 0x1000);
+ CU_ASSERT(ctrlr.cmb_current_offset == 0x1200);
+
+ rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800, 0x1000, &offset);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(offset == 0x2000);
+ CU_ASSERT(ctrlr.cmb_current_offset == 0x2800);
+
+ rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800000, 0x100000, &offset);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(offset == 0x100000);
+ CU_ASSERT(ctrlr.cmb_current_offset == 0x900000);
+
+ rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x8000000, 0x1000, &offset);
+ CU_ASSERT(rc == -1);
+}
+#endif
+
+static void
+test_spdk_nvme_ctrlr_update_firmware(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+ void *payload = NULL;
+ int point_payload = 1;
+ int slot = 0;
+ int ret = 0;
+ struct spdk_nvme_status status;
+ enum spdk_nvme_fw_commit_action commit_action = SPDK_NVME_FW_COMMIT_REPLACE_IMG;
+
+ /* Set invalid size check function return value */
+ set_size = 5;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -1);
+
+ /* When payload is NULL but set_size < min_page_size */
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -1);
+
+ /* When payload not NULL but min_page_size is 0 */
+ set_size = 4;
+ ctrlr.min_page_size = 0;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -1);
+
+ /* Check firmware image download when payload not NULL and min_page_size not 0 , status.cpl value is 1 */
+ set_status_cpl = 1;
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -ENXIO);
+
+ /* Check firmware image download and set status.cpl value is 0 */
+ set_status_cpl = 0;
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -1);
+
+ /* Check firmware commit */
+ ctrlr.is_resetting = false;
+ set_status_cpl = 0;
+ slot = 1;
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -ENXIO);
+
+ /* Set size check firmware download and firmware commit */
+ ctrlr.is_resetting = true;
+ set_status_cpl = 0;
+ slot = 1;
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == 0);
+
+ /* nvme_wait_for_completion returns an error */
+ g_wait_for_completion_return_val = -1;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -ENXIO);
+ CU_ASSERT(g_failed_status != NULL);
+ CU_ASSERT(g_failed_status->timed_out == true);
+ /* status should be freed by callback, which is not triggered in test env.
+ Store status to global variable and free it manually.
+ If spdk_nvme_ctrlr_update_firmware changes its behaviour and frees the status
+ itself, we'll get a double free here.. */
+ free(g_failed_status);
+ g_failed_status = NULL;
+ g_wait_for_completion_return_val = 0;
+
+ set_status_cpl = 0;
+}
+
+int
+nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl_sc(cb_fn, cb_arg);
+ return 0;
+}
+
+static void
+test_spdk_nvme_ctrlr_doorbell_buffer_config(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+ int ret = -1;
+
+ ctrlr.cdata.oacs.doorbell_buffer_config = 1;
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ ctrlr.page_size = 0x1000;
+ MOCK_CLEAR(spdk_malloc);
+ MOCK_CLEAR(spdk_zmalloc);
+ ret = nvme_ctrlr_set_doorbell_buffer_config(&ctrlr);
+ CU_ASSERT(ret == 0);
+ nvme_ctrlr_free_doorbell_buffer(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_test_active_ns(void)
+{
+ uint32_t nsid, minor;
+ size_t ns_id_count;
+ struct spdk_nvme_ctrlr ctrlr = {.state = NVME_CTRLR_STATE_READY};
+
+ ctrlr.page_size = 0x1000;
+
+ for (minor = 0; minor <= 2; minor++) {
+ ctrlr.vs.bits.mjr = 1;
+ ctrlr.vs.bits.mnr = minor;
+ ctrlr.vs.bits.ter = 0;
+ ctrlr.num_ns = 1531;
+ nvme_ctrlr_identify_active_ns(&ctrlr);
+
+ for (nsid = 1; nsid <= ctrlr.num_ns; nsid++) {
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
+ }
+ ctrlr.num_ns = 1559;
+ for (; nsid <= ctrlr.num_ns; nsid++) {
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == false);
+ }
+ ctrlr.num_ns = 1531;
+ for (nsid = 0; nsid < ctrlr.num_ns; nsid++) {
+ ctrlr.active_ns_list[nsid] = 0;
+ }
+ CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 0);
+
+ ctrlr.active_ns_list[0] = 1;
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
+ nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
+ CU_ASSERT(nsid == 1);
+
+ ctrlr.active_ns_list[1] = 3;
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3) == true);
+ nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
+ CU_ASSERT(nsid == 3);
+ nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
+ CU_ASSERT(nsid == 0);
+
+ memset(ctrlr.active_ns_list, 0, ctrlr.num_ns);
+ for (nsid = 0; nsid < ctrlr.num_ns; nsid++) {
+ ctrlr.active_ns_list[nsid] = nsid + 1;
+ }
+
+ ns_id_count = 0;
+ for (nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
+ nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid)) {
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
+ ns_id_count++;
+ }
+ CU_ASSERT(ns_id_count == ctrlr.num_ns);
+
+ nvme_ctrlr_destruct(&ctrlr);
+ }
+}
+
+static void
+test_nvme_ctrlr_test_active_ns_error_case(void)
+{
+ int rc;
+ struct spdk_nvme_ctrlr ctrlr = {.state = NVME_CTRLR_STATE_READY};
+
+ ctrlr.page_size = 0x1000;
+ ctrlr.vs.bits.mjr = 1;
+ ctrlr.vs.bits.mnr = 2;
+ ctrlr.vs.bits.ter = 0;
+ ctrlr.num_ns = 2;
+
+ set_status_code = SPDK_NVME_SC_INVALID_FIELD;
+ rc = nvme_ctrlr_identify_active_ns(&ctrlr);
+ CU_ASSERT(rc == -ENXIO);
+ set_status_code = SPDK_NVME_SC_SUCCESS;
+}
+
+static void
+test_nvme_ctrlr_init_delay(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ /* Test that the initialization delay works correctly. We only
+ * do the initialization delay on SSDs that require it, so
+ * set that quirk here.
+ */
+ ctrlr.quirks = NVME_QUIRK_DELAY_BEFORE_INIT;
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.state = NVME_CTRLR_STATE_INIT_DELAY;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(ctrlr.sleep_timeout_tsc != 0);
+
+ /* delay 1s, just return as sleep time isn't enough */
+ spdk_delay_us(1 * spdk_get_ticks_hz());
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(ctrlr.sleep_timeout_tsc != 0);
+
+ /* sleep timeout, start to initialize */
+ spdk_delay_us(2 * spdk_get_ticks_hz());
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_RESET_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_spdk_nvme_ctrlr_set_trid(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct spdk_nvme_transport_id new_trid = {{0}};
+
+ ctrlr.is_failed = false;
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+ snprintf(ctrlr.trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
+ snprintf(ctrlr.trid.traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.8");
+ snprintf(ctrlr.trid.trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4420");
+ CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EPERM);
+
+ ctrlr.is_failed = true;
+ new_trid.trtype = SPDK_NVME_TRANSPORT_TCP;
+ CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EINVAL);
+ CU_ASSERT(ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA);
+
+ new_trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+ snprintf(new_trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode2");
+ CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == -EINVAL);
+ CU_ASSERT(strncmp(ctrlr.trid.subnqn, "nqn.2016-06.io.spdk:cnode1", SPDK_NVMF_NQN_MAX_LEN) == 0);
+
+
+ snprintf(new_trid.subnqn, SPDK_NVMF_NQN_MAX_LEN, "%s", "nqn.2016-06.io.spdk:cnode1");
+ snprintf(new_trid.traddr, SPDK_NVMF_TRADDR_MAX_LEN, "%s", "192.168.100.9");
+ snprintf(new_trid.trsvcid, SPDK_NVMF_TRSVCID_MAX_LEN, "%s", "4421");
+ CU_ASSERT(spdk_nvme_ctrlr_set_trid(&ctrlr, &new_trid) == 0);
+ CU_ASSERT(strncmp(ctrlr.trid.traddr, "192.168.100.9", SPDK_NVMF_TRADDR_MAX_LEN) == 0);
+ CU_ASSERT(strncmp(ctrlr.trid.trsvcid, "4421", SPDK_NVMF_TRSVCID_MAX_LEN) == 0);
+}
+
+static void
+test_nvme_ctrlr_init_set_nvmf_ioccsz(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ /* equivalent of 4096 bytes */
+ ctrlr.cdata.nvmf_specific.ioccsz = 260;
+ ctrlr.cdata.nvmf_specific.icdoff = 1;
+
+ /* Check PCI trtype, */
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+
+ ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
+
+ CU_ASSERT(ctrlr.ioccsz_bytes == 0);
+ CU_ASSERT(ctrlr.icdoff == 0);
+
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /* Check RDMA trtype, */
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+
+ ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
+
+ CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
+ CU_ASSERT(ctrlr.icdoff == 1);
+ ctrlr.ioccsz_bytes = 0;
+ ctrlr.icdoff = 0;
+
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /* Check TCP trtype, */
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_TCP;
+
+ ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
+
+ CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
+ CU_ASSERT(ctrlr.icdoff == 1);
+ ctrlr.ioccsz_bytes = 0;
+ ctrlr.icdoff = 0;
+
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /* Check FC trtype, */
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_FC;
+
+ ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
+
+ CU_ASSERT(ctrlr.ioccsz_bytes == 4096);
+ CU_ASSERT(ctrlr.icdoff == 1);
+ ctrlr.ioccsz_bytes = 0;
+ ctrlr.icdoff = 0;
+
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /* Check CUSTOM trtype, */
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_CUSTOM;
+
+ ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
+
+ CU_ASSERT(ctrlr.ioccsz_bytes == 0);
+ CU_ASSERT(ctrlr.icdoff == 0);
+
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_set_num_queues(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ ctrlr.state = NVME_CTRLR_STATE_IDENTIFY;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> SET_NUM_QUEUES */
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_NUM_QUEUES);
+
+ ctrlr.opts.num_io_queues = 64;
+ /* Num queues is zero-based. So, use 31 to get 32 queues */
+ fake_cpl.cdw0 = 31 + (31 << 16);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> CONSTRUCT_NS */
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_CONSTRUCT_NS);
+ CU_ASSERT(ctrlr.opts.num_io_queues == 32);
+ fake_cpl.cdw0 = 0;
+
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_set_keep_alive_timeout(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ ctrlr.opts.keep_alive_timeout_ms = 60000;
+ ctrlr.cdata.kas = 1;
+ ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
+ fake_cpl.cdw0 = 120000;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> SET_HOST_ID */
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_HOST_ID);
+ CU_ASSERT(ctrlr.opts.keep_alive_timeout_ms == 120000);
+ fake_cpl.cdw0 = 0;
+
+ /* Target does not support Get Feature "Keep Alive Timer" */
+ ctrlr.opts.keep_alive_timeout_ms = 60000;
+ ctrlr.cdata.kas = 1;
+ ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
+ set_status_code = SPDK_NVME_SC_INVALID_FIELD;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> SET_HOST_ID */
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_SET_HOST_ID);
+ CU_ASSERT(ctrlr.opts.keep_alive_timeout_ms == 60000);
+ set_status_code = SPDK_NVME_SC_SUCCESS;
+
+ /* Target fails Get Feature "Keep Alive Timer" for another reason */
+ ctrlr.opts.keep_alive_timeout_ms = 60000;
+ ctrlr.cdata.kas = 1;
+ ctrlr.state = NVME_CTRLR_STATE_SET_KEEP_ALIVE_TIMEOUT;
+ set_status_code = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0); /* -> ERROR */
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ERROR);
+ set_status_code = SPDK_NVME_SC_SUCCESS;
+
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_ctrlr", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_1_rdy_0);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_1_rdy_1);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_1);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_rr);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_en_0_rdy_0_ams_vs);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_delay);
+ CU_ADD_TEST(suite, test_alloc_io_qpair_rr_1);
+ CU_ADD_TEST(suite, test_ctrlr_get_default_ctrlr_opts);
+ CU_ADD_TEST(suite, test_ctrlr_get_default_io_qpair_opts);
+ CU_ADD_TEST(suite, test_alloc_io_qpair_wrr_1);
+ CU_ADD_TEST(suite, test_alloc_io_qpair_wrr_2);
+ CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_update_firmware);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_fail);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_construct_intel_support_log_page_list);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_set_supported_features);
+ CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_doorbell_buffer_config);
+#if 0 /* TODO: move to PCIe-specific unit test */
+ CU_ADD_TEST(suite, test_nvme_ctrlr_alloc_cmb);
+#endif
+ CU_ADD_TEST(suite, test_nvme_ctrlr_test_active_ns);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_test_active_ns_error_case);
+ CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_reconnect_io_qpair);
+ CU_ADD_TEST(suite, test_spdk_nvme_ctrlr_set_trid);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_nvmf_ioccsz);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_num_queues);
+ CU_ADD_TEST(suite, test_nvme_ctrlr_init_set_keep_alive_timeout);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/.gitignore
new file mode 100644
index 000000000..1568b4763
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/.gitignore
@@ -0,0 +1 @@
+nvme_ctrlr_cmd_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/Makefile
new file mode 100644
index 000000000..5c647dd31
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ctrlr_cmd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut.c
new file mode 100644
index 000000000..581d6134c
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut.c
@@ -0,0 +1,751 @@
+
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_ctrlr_cmd.c"
+
+#define CTRLR_CDATA_ELPE 5
+
+pid_t g_spdk_nvme_pid;
+
+struct nvme_request g_req;
+
+uint32_t error_num_entries;
+uint32_t health_log_nsid = 1;
+uint8_t feature = 1;
+uint32_t feature_cdw11 = 1;
+uint32_t feature_cdw12 = 1;
+uint8_t get_feature = 1;
+uint32_t get_feature_cdw11 = 1;
+uint32_t fw_img_size = 1024;
+uint32_t fw_img_offset = 0;
+uint16_t abort_cid = 1;
+uint16_t abort_sqid = 1;
+uint32_t namespace_management_nsid = 1;
+uint64_t PRP_ENTRY_1 = 4096;
+uint64_t PRP_ENTRY_2 = 4096;
+uint32_t format_nvme_nsid = 1;
+uint32_t sanitize_nvme_nsid = 1;
+uint32_t expected_host_id_size = 0xFF;
+
+uint32_t expected_feature_ns = 2;
+uint32_t expected_feature_cdw10 = SPDK_NVME_FEAT_LBA_RANGE_TYPE;
+uint32_t expected_feature_cdw11 = 1;
+uint32_t expected_feature_cdw12 = 1;
+
+typedef void (*verify_request_fn_t)(struct nvme_request *req);
+verify_request_fn_t verify_fn;
+
+static void verify_firmware_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(req->cmd.nsid == SPDK_NVME_GLOBAL_NS_TAG);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_firmware_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_LOG_FIRMWARE_SLOT;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_health_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(req->cmd.nsid == health_log_nsid);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_health_information_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_LOG_HEALTH_INFORMATION;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_error_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(req->cmd.nsid == SPDK_NVME_GLOBAL_NS_TAG);
+
+ temp_cdw10 = (((sizeof(struct spdk_nvme_error_information_entry) * error_num_entries) /
+ sizeof(uint32_t) - 1) << 16) | SPDK_NVME_LOG_ERROR;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_set_feature_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_SET_FEATURES);
+ CU_ASSERT(req->cmd.cdw10 == feature);
+ CU_ASSERT(req->cmd.cdw11 == feature_cdw11);
+ CU_ASSERT(req->cmd.cdw12 == feature_cdw12);
+}
+
+static void verify_set_feature_ns_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_SET_FEATURES);
+ CU_ASSERT(req->cmd.cdw10 == expected_feature_cdw10);
+ CU_ASSERT(req->cmd.cdw11 == expected_feature_cdw11);
+ CU_ASSERT(req->cmd.cdw12 == expected_feature_cdw12);
+ CU_ASSERT(req->cmd.nsid == expected_feature_ns);
+}
+
+static void verify_get_feature_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_FEATURES);
+ CU_ASSERT(req->cmd.cdw10 == get_feature);
+ CU_ASSERT(req->cmd.cdw11 == get_feature_cdw11);
+}
+
+static void verify_get_feature_ns_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_FEATURES);
+ CU_ASSERT(req->cmd.cdw10 == expected_feature_cdw10);
+ CU_ASSERT(req->cmd.cdw11 == expected_feature_cdw11);
+ CU_ASSERT(req->cmd.nsid == expected_feature_ns);
+}
+
+static void verify_abort_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ABORT);
+ CU_ASSERT(req->cmd.cdw10 == (((uint32_t)abort_cid << 16) | abort_sqid));
+}
+
+static void verify_io_cmd_raw_no_payload_build(struct nvme_request *req)
+{
+ struct spdk_nvme_cmd command = {};
+ struct nvme_payload payload = {};
+
+ CU_ASSERT(memcmp(&req->cmd, &command, sizeof(req->cmd)) == 0);
+ CU_ASSERT(memcmp(&req->payload, &payload, sizeof(req->payload)) == 0);
+}
+
+static void verify_io_raw_cmd(struct nvme_request *req)
+{
+ struct spdk_nvme_cmd command = {};
+
+ CU_ASSERT(memcmp(&req->cmd, &command, sizeof(req->cmd)) == 0);
+}
+
+static void verify_io_raw_cmd_with_md(struct nvme_request *req)
+{
+ struct spdk_nvme_cmd command = {};
+
+ CU_ASSERT(memcmp(&req->cmd, &command, sizeof(req->cmd)) == 0);
+}
+
+static void verify_set_host_id_cmd(struct nvme_request *req)
+{
+ switch (expected_host_id_size) {
+ case 8:
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_FEAT_HOST_IDENTIFIER);
+ CU_ASSERT(req->cmd.cdw11 == 0);
+ CU_ASSERT(req->cmd.cdw12 == 0);
+ break;
+ case 16:
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_FEAT_HOST_IDENTIFIER);
+ CU_ASSERT(req->cmd.cdw11 == 1);
+ CU_ASSERT(req->cmd.cdw12 == 0);
+ break;
+ default:
+ CU_ASSERT(0);
+ }
+}
+
+static void verify_intel_smart_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(req->cmd.nsid == health_log_nsid);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_smart_information_page) /
+ sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_SMART;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_temperature_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_temperature_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_TEMPERATURE;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_read_latency_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_rw_latency_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_write_latency_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_rw_latency_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_get_log_page_directory(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_log_page_directory) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_marketing_description_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_marketing_description_page) / sizeof(
+ uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_MARKETING_DESCRIPTION;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_namespace_attach(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_NS_ATTACHMENT);
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_NS_CTRLR_ATTACH);
+ CU_ASSERT(req->cmd.nsid == namespace_management_nsid);
+}
+
+static void verify_namespace_detach(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_NS_ATTACHMENT);
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_NS_CTRLR_DETACH);
+ CU_ASSERT(req->cmd.nsid == namespace_management_nsid);
+}
+
+static void verify_namespace_create(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_NS_MANAGEMENT);
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_NS_MANAGEMENT_CREATE);
+ CU_ASSERT(req->cmd.nsid == 0);
+}
+
+static void verify_namespace_delete(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_NS_MANAGEMENT);
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_NS_MANAGEMENT_DELETE);
+ CU_ASSERT(req->cmd.nsid == namespace_management_nsid);
+}
+
+static void verify_doorbell_buffer_config(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_DOORBELL_BUFFER_CONFIG);
+ CU_ASSERT(req->cmd.dptr.prp.prp1 == PRP_ENTRY_1);
+ CU_ASSERT(req->cmd.dptr.prp.prp2 == PRP_ENTRY_2);
+}
+
+static void verify_format_nvme(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_FORMAT_NVM);
+ CU_ASSERT(req->cmd.cdw10 == 0);
+ CU_ASSERT(req->cmd.nsid == format_nvme_nsid);
+}
+
+static void verify_fw_commit(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_FIRMWARE_COMMIT);
+ CU_ASSERT(req->cmd.cdw10 == 0x09);
+}
+
+static void verify_fw_image_download(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD);
+ CU_ASSERT(req->cmd.cdw10 == (fw_img_size >> 2) - 1);
+ CU_ASSERT(req->cmd.cdw11 == fw_img_offset >> 2);
+}
+
+static void verify_nvme_sanitize(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_SANITIZE);
+ CU_ASSERT(req->cmd.cdw10 == 0x309);
+ CU_ASSERT(req->cmd.cdw11 == 0);
+ CU_ASSERT(req->cmd.nsid == sanitize_nvme_nsid);
+}
+
+struct nvme_request *
+nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair, void *buffer, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller)
+{
+ /* For the unit test, we don't actually need to copy the buffer */
+ return nvme_allocate_request_contig(qpair, buffer, payload_size, cb_fn, cb_arg);
+}
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ verify_fn(req);
+ /* stop analyzer from thinking stack variable addresses are stored in a global */
+ memset(req, 0, sizeof(*req));
+
+ return 0;
+}
+
+int
+nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr, struct nvme_request *req)
+{
+ verify_fn(req);
+ /* stop analyzer from thinking stack variable addresses are stored in a global */
+ memset(req, 0, sizeof(*req));
+
+ return 0;
+}
+
+#define DECLARE_AND_CONSTRUCT_CTRLR() \
+ struct spdk_nvme_ctrlr ctrlr = {}; \
+ struct spdk_nvme_qpair adminq = {}; \
+ struct nvme_request req; \
+ \
+ STAILQ_INIT(&adminq.free_req); \
+ STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq); \
+ ctrlr.adminq = &adminq;
+
+static void
+test_firmware_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_firmware_page payload = {};
+
+ verify_fn = verify_firmware_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_FIRMWARE_SLOT, SPDK_NVME_GLOBAL_NS_TAG,
+ &payload,
+ sizeof(payload), 0, NULL, NULL);
+}
+
+static void
+test_health_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_health_information_page payload = {};
+
+ verify_fn = verify_health_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION, health_log_nsid,
+ &payload,
+ sizeof(payload), 0, NULL, NULL);
+}
+
+static void
+test_error_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_error_information_entry payload = {};
+
+ ctrlr.cdata.elpe = CTRLR_CDATA_ELPE;
+
+ verify_fn = verify_error_log_page;
+
+ /* valid page */
+ error_num_entries = 1;
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_ERROR, SPDK_NVME_GLOBAL_NS_TAG, &payload,
+ sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_smart_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_smart_information_page payload = {};
+
+ verify_fn = verify_intel_smart_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_SMART, health_log_nsid, &payload,
+ sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_temperature_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_temperature_page payload = {};
+
+ verify_fn = verify_intel_temperature_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE, SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_read_latency_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_rw_latency_page payload = {};
+
+ verify_fn = verify_intel_read_latency_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY,
+ SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_write_latency_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_rw_latency_page payload = {};
+
+ verify_fn = verify_intel_write_latency_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY,
+ SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_get_log_page_directory(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_log_page_directory payload = {};
+
+ verify_fn = verify_intel_get_log_page_directory;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY,
+ SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_marketing_description_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_marketing_description_page payload = {};
+
+ verify_fn = verify_intel_marketing_description_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_MARKETING_DESCRIPTION,
+ SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_generic_get_log_pages(void)
+{
+ test_error_get_log_page();
+ test_health_get_log_page();
+ test_firmware_get_log_page();
+}
+
+static void test_intel_get_log_pages(void)
+{
+ test_intel_get_log_page_directory();
+ test_intel_smart_get_log_page();
+ test_intel_temperature_get_log_page();
+ test_intel_read_latency_get_log_page();
+ test_intel_write_latency_get_log_page();
+ test_intel_marketing_description_get_log_page();
+}
+
+static void
+test_set_feature_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_set_feature_cmd;
+
+ spdk_nvme_ctrlr_cmd_set_feature(&ctrlr, feature, feature_cdw11, feature_cdw12, NULL, 0, NULL, NULL);
+}
+
+static void
+test_get_feature_ns_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_get_feature_ns_cmd;
+
+ spdk_nvme_ctrlr_cmd_get_feature_ns(&ctrlr, expected_feature_cdw10,
+ expected_feature_cdw11, NULL, 0,
+ NULL, NULL, expected_feature_ns);
+}
+
+static void
+test_set_feature_ns_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_set_feature_ns_cmd;
+
+ spdk_nvme_ctrlr_cmd_set_feature_ns(&ctrlr, expected_feature_cdw10,
+ expected_feature_cdw11, expected_feature_cdw12,
+ NULL, 0, NULL, NULL, expected_feature_ns);
+}
+
+static void
+test_get_feature_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_get_feature_cmd;
+
+ spdk_nvme_ctrlr_cmd_get_feature(&ctrlr, get_feature, get_feature_cdw11, NULL, 0, NULL, NULL);
+}
+
+static void
+test_abort_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_qpair qpair = {};
+
+ STAILQ_INIT(&ctrlr.queued_aborts);
+
+ verify_fn = verify_abort_cmd;
+
+ qpair.id = abort_sqid;
+ spdk_nvme_ctrlr_cmd_abort(&ctrlr, &qpair, abort_cid, NULL, NULL);
+}
+
+static void
+test_io_cmd_raw_no_payload_build(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_cmd cmd = {};
+
+ verify_fn = verify_io_cmd_raw_no_payload_build;
+
+ spdk_nvme_ctrlr_io_cmd_raw_no_payload_build(&ctrlr, &qpair, &cmd, NULL, NULL);
+}
+
+static void
+test_io_raw_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_cmd cmd = {};
+
+ verify_fn = verify_io_raw_cmd;
+
+ spdk_nvme_ctrlr_cmd_io_raw(&ctrlr, &qpair, &cmd, NULL, 1, NULL, NULL);
+}
+
+static void
+test_io_raw_cmd_with_md(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_cmd cmd = {};
+
+ verify_fn = verify_io_raw_cmd_with_md;
+
+ spdk_nvme_ctrlr_cmd_io_raw_with_md(&ctrlr, &qpair, &cmd, NULL, 1, NULL, NULL, NULL);
+}
+
+static int
+test_set_host_id_by_case(uint32_t host_id_size)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ int rc = 0;
+
+ expected_host_id_size = host_id_size;
+ verify_fn = verify_set_host_id_cmd;
+
+ rc = nvme_ctrlr_cmd_set_host_id(&ctrlr, NULL, expected_host_id_size, NULL, NULL);
+
+ return rc;
+}
+
+static void
+test_set_host_id_cmds(void)
+{
+ int rc = 0;
+
+ rc = test_set_host_id_by_case(8);
+ CU_ASSERT(rc == 0);
+ rc = test_set_host_id_by_case(16);
+ CU_ASSERT(rc == 0);
+ rc = test_set_host_id_by_case(1024);
+ CU_ASSERT(rc == -EINVAL);
+}
+
+static void
+test_get_log_pages(void)
+{
+ test_generic_get_log_pages();
+ test_intel_get_log_pages();
+}
+
+static void
+test_namespace_attach(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_ctrlr_list payload = {};
+
+ verify_fn = verify_namespace_attach;
+
+ nvme_ctrlr_cmd_attach_ns(&ctrlr, namespace_management_nsid, &payload, NULL, NULL);
+}
+
+static void
+test_namespace_detach(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_ctrlr_list payload = {};
+
+ verify_fn = verify_namespace_detach;
+
+ nvme_ctrlr_cmd_detach_ns(&ctrlr, namespace_management_nsid, &payload, NULL, NULL);
+}
+
+static void
+test_namespace_create(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_ns_data payload = {};
+
+ verify_fn = verify_namespace_create;
+ nvme_ctrlr_cmd_create_ns(&ctrlr, &payload, NULL, NULL);
+}
+
+static void
+test_namespace_delete(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_namespace_delete;
+ nvme_ctrlr_cmd_delete_ns(&ctrlr, namespace_management_nsid, NULL, NULL);
+}
+
+static void
+test_doorbell_buffer_config(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_doorbell_buffer_config;
+
+ nvme_ctrlr_cmd_doorbell_buffer_config(&ctrlr, PRP_ENTRY_1, PRP_ENTRY_2, NULL, NULL);
+}
+
+static void
+test_format_nvme(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_format format = {};
+
+ verify_fn = verify_format_nvme;
+
+ nvme_ctrlr_cmd_format(&ctrlr, format_nvme_nsid, &format, NULL, NULL);
+}
+
+static void
+test_fw_commit(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_fw_commit fw_commit = {};
+
+ fw_commit.ca = SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG;
+ fw_commit.fs = 1;
+
+ verify_fn = verify_fw_commit;
+
+ nvme_ctrlr_cmd_fw_commit(&ctrlr, &fw_commit, NULL, NULL);
+}
+
+static void
+test_fw_image_download(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_fw_image_download;
+
+ nvme_ctrlr_cmd_fw_image_download(&ctrlr, fw_img_size, fw_img_offset, NULL,
+ NULL, NULL);
+}
+
+static void
+test_sanitize(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_sanitize sanitize = {};
+
+ sanitize.sanact = 1;
+ sanitize.ause = 1;
+ sanitize.oipbp = 1;
+ sanitize.ndas = 1;
+
+ verify_fn = verify_nvme_sanitize;
+
+ nvme_ctrlr_cmd_sanitize(&ctrlr, sanitize_nvme_nsid, &sanitize, 0, NULL, NULL);
+
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_ctrlr_cmd", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_get_log_pages);
+ CU_ADD_TEST(suite, test_set_feature_cmd);
+ CU_ADD_TEST(suite, test_set_feature_ns_cmd);
+ CU_ADD_TEST(suite, test_get_feature_cmd);
+ CU_ADD_TEST(suite, test_get_feature_ns_cmd);
+ CU_ADD_TEST(suite, test_abort_cmd);
+ CU_ADD_TEST(suite, test_set_host_id_cmds);
+ CU_ADD_TEST(suite, test_io_cmd_raw_no_payload_build);
+ CU_ADD_TEST(suite, test_io_raw_cmd);
+ CU_ADD_TEST(suite, test_io_raw_cmd_with_md);
+ CU_ADD_TEST(suite, test_namespace_attach);
+ CU_ADD_TEST(suite, test_namespace_detach);
+ CU_ADD_TEST(suite, test_namespace_create);
+ CU_ADD_TEST(suite, test_namespace_delete);
+ CU_ADD_TEST(suite, test_doorbell_buffer_config);
+ CU_ADD_TEST(suite, test_format_nvme);
+ CU_ADD_TEST(suite, test_fw_commit);
+ CU_ADD_TEST(suite, test_fw_image_download);
+ CU_ADD_TEST(suite, test_sanitize);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/.gitignore
new file mode 100644
index 000000000..2813105d4
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/.gitignore
@@ -0,0 +1 @@
+nvme_ctrlr_ocssd_cmd_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/Makefile
new file mode 100644
index 000000000..9446b8d53
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ctrlr_ocssd_cmd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut.c
new file mode 100644
index 000000000..69de8c5b0
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut.c
@@ -0,0 +1,106 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_ctrlr_ocssd_cmd.c"
+
+#define DECLARE_AND_CONSTRUCT_CTRLR() \
+ struct spdk_nvme_ctrlr ctrlr = {}; \
+ struct spdk_nvme_qpair adminq = {}; \
+ struct nvme_request req; \
+ \
+ STAILQ_INIT(&adminq.free_req); \
+ STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq); \
+ ctrlr.adminq = &adminq;
+
+pid_t g_spdk_nvme_pid;
+struct nvme_request g_req;
+typedef void (*verify_request_fn_t)(struct nvme_request *req);
+verify_request_fn_t verify_fn;
+
+static const uint32_t expected_geometry_ns = 1;
+
+int
+nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr, struct nvme_request *req)
+{
+ verify_fn(req);
+ memset(req, 0, sizeof(*req));
+ return 0;
+}
+
+struct nvme_request *
+nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair, void *buffer, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller)
+{
+ /* For the unit test, we don't actually need to copy the buffer */
+ return nvme_allocate_request_contig(qpair, buffer, payload_size, cb_fn, cb_arg);
+}
+
+static void verify_geometry_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_OCSSD_OPC_GEOMETRY);
+ CU_ASSERT(req->cmd.nsid == expected_geometry_ns);
+}
+
+static void
+test_geometry_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ struct spdk_ocssd_geometry_data geo;
+
+ verify_fn = verify_geometry_cmd;
+
+ spdk_nvme_ocssd_ctrlr_cmd_geometry(&ctrlr, expected_geometry_ns, &geo,
+ sizeof(geo), NULL, NULL);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_ctrlr_cmd", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_geometry_cmd);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ns.c/.gitignore
new file mode 100644
index 000000000..ada0ec86d
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns.c/.gitignore
@@ -0,0 +1 @@
+nvme_ns_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ns.c/Makefile
new file mode 100644
index 000000000..add85ee9f
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ns_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns.c/nvme_ns_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ns.c/nvme_ns_ut.c
new file mode 100644
index 000000000..22c59e06c
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns.c/nvme_ns_ut.c
@@ -0,0 +1,153 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "spdk/env.h"
+
+#include "nvme/nvme_ns.c"
+
+#include "common/lib/test_env.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME)
+
+DEFINE_STUB(nvme_wait_for_completion_robust_lock, int,
+ (struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status,
+ pthread_mutex_t *robust_mutex), 0);
+
+int
+nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
+ void *payload, size_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return -1;
+}
+
+void
+nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+}
+
+int32_t
+spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ return -1;
+}
+
+static void
+test_nvme_ns_construct(void)
+{
+ struct spdk_nvme_ns ns = {};
+ uint32_t id = 1;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ nvme_ns_construct(&ns, id, &ctrlr);
+ CU_ASSERT(ns.id == 1);
+}
+
+static void
+test_nvme_ns_uuid(void)
+{
+ struct spdk_nvme_ns ns = {};
+ const struct spdk_uuid *uuid;
+ struct spdk_uuid expected_uuid;
+
+ memset(&expected_uuid, 0xA5, sizeof(expected_uuid));
+
+ /* Empty list - no UUID should be found */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ CU_ASSERT(uuid == NULL);
+
+ /* NGUID only (no UUID in list) */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ ns.id_desc_list[0] = 0x02; /* NIDT == NGUID */
+ ns.id_desc_list[1] = 0x10; /* NIDL */
+ memset(&ns.id_desc_list[4], 0xCC, 0x10);
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ CU_ASSERT(uuid == NULL);
+
+ /* Just UUID in the list */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ ns.id_desc_list[0] = 0x03; /* NIDT == UUID */
+ ns.id_desc_list[1] = 0x10; /* NIDL */
+ memcpy(&ns.id_desc_list[4], &expected_uuid, sizeof(expected_uuid));
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ SPDK_CU_ASSERT_FATAL(uuid != NULL);
+ CU_ASSERT(memcmp(uuid, &expected_uuid, sizeof(*uuid)) == 0);
+
+ /* UUID followed by NGUID */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ ns.id_desc_list[0] = 0x03; /* NIDT == UUID */
+ ns.id_desc_list[1] = 0x10; /* NIDL */
+ memcpy(&ns.id_desc_list[4], &expected_uuid, sizeof(expected_uuid));
+ ns.id_desc_list[20] = 0x02; /* NIDT == NGUID */
+ ns.id_desc_list[21] = 0x10; /* NIDL */
+ memset(&ns.id_desc_list[24], 0xCC, 0x10);
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ SPDK_CU_ASSERT_FATAL(uuid != NULL);
+ CU_ASSERT(memcmp(uuid, &expected_uuid, sizeof(*uuid)) == 0);
+
+ /* NGUID followed by UUID */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ ns.id_desc_list[0] = 0x02; /* NIDT == NGUID */
+ ns.id_desc_list[1] = 0x10; /* NIDL */
+ memset(&ns.id_desc_list[4], 0xCC, 0x10);
+ ns.id_desc_list[20] = 0x03; /* NIDT = UUID */
+ ns.id_desc_list[21] = 0x10; /* NIDL */
+ memcpy(&ns.id_desc_list[24], &expected_uuid, sizeof(expected_uuid));
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ SPDK_CU_ASSERT_FATAL(uuid != NULL);
+ CU_ASSERT(memcmp(uuid, &expected_uuid, sizeof(*uuid)) == 0);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_nvme_ns_construct);
+ CU_ADD_TEST(suite, test_nvme_ns_uuid);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/.gitignore
new file mode 100644
index 000000000..5583ec23e
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/.gitignore
@@ -0,0 +1 @@
+nvme_ns_cmd_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/Makefile
new file mode 100644
index 000000000..ff451d72a
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ns_cmd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c
new file mode 100644
index 000000000..fe0014f56
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c
@@ -0,0 +1,1739 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_ns_cmd.c"
+#include "nvme/nvme.c"
+
+#include "common/lib/test_env.c"
+
+static struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+static struct nvme_request *g_request = NULL;
+
+int
+spdk_pci_enumerate(struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb, void *enum_ctx)
+{
+ return -1;
+}
+
+static void nvme_request_reset_sgl(void *cb_arg, uint32_t sgl_offset)
+{
+}
+
+static int nvme_request_next_sge(void *cb_arg, void **address, uint32_t *length)
+{
+ uint32_t *lba_count = cb_arg;
+
+ /*
+ * We need to set address to something here, since the SGL splitting code will
+ * use it to determine PRP compatibility. Just use a rather arbitrary address
+ * for now - these tests will not actually cause data to be read from or written
+ * to this address.
+ */
+ *address = (void *)(uintptr_t)0x10000000;
+ *length = *lba_count;
+ return 0;
+}
+
+bool
+spdk_nvme_transport_available_by_name(const char *transport_name)
+{
+ return true;
+}
+
+struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ void *devhandle)
+{
+ return NULL;
+}
+
+void
+nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+}
+
+int
+nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+void
+nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
+{
+}
+
+struct spdk_pci_addr
+spdk_pci_device_get_addr(struct spdk_pci_device *pci_dev)
+{
+ struct spdk_pci_addr pci_addr;
+
+ memset(&pci_addr, 0, sizeof(pci_addr));
+ return pci_addr;
+}
+
+struct spdk_pci_id
+spdk_pci_device_get_id(struct spdk_pci_device *pci_dev)
+{
+ struct spdk_pci_id pci_id;
+
+ memset(&pci_id, 0xFF, sizeof(pci_id));
+
+ return pci_id;
+}
+
+void
+spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
+{
+ memset(opts, 0, sizeof(*opts));
+}
+
+uint32_t
+spdk_nvme_ns_get_sector_size(struct spdk_nvme_ns *ns)
+{
+ return ns->sector_size;
+}
+
+uint32_t
+spdk_nvme_ns_get_max_io_xfer_size(struct spdk_nvme_ns *ns)
+{
+ return ns->ctrlr->max_xfer_size;
+}
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ g_request = req;
+
+ return 0;
+}
+
+void
+nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return;
+}
+
+void
+nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return;
+}
+
+int
+nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
+ bool direct_connect)
+{
+ return 0;
+}
+
+static void
+prepare_for_test(struct spdk_nvme_ns *ns, struct spdk_nvme_ctrlr *ctrlr,
+ struct spdk_nvme_qpair *qpair,
+ uint32_t sector_size, uint32_t md_size, uint32_t max_xfer_size,
+ uint32_t stripe_size, bool extended_lba)
+{
+ uint32_t num_requests = 32;
+ uint32_t i;
+
+ ctrlr->max_xfer_size = max_xfer_size;
+ /*
+ * Clear the flags field - we especially want to make sure the SGL_SUPPORTED flag is not set
+ * so that we test the SGL splitting path.
+ */
+ ctrlr->flags = 0;
+ ctrlr->min_page_size = 4096;
+ ctrlr->page_size = 4096;
+ memset(&ctrlr->opts, 0, sizeof(ctrlr->opts));
+ memset(ns, 0, sizeof(*ns));
+ ns->ctrlr = ctrlr;
+ ns->sector_size = sector_size;
+ ns->extended_lba_size = sector_size;
+ if (extended_lba) {
+ ns->flags |= SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED;
+ ns->extended_lba_size += md_size;
+ }
+ ns->md_size = md_size;
+ ns->sectors_per_max_io = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->extended_lba_size;
+ ns->sectors_per_stripe = stripe_size / ns->extended_lba_size;
+
+ memset(qpair, 0, sizeof(*qpair));
+ qpair->ctrlr = ctrlr;
+ qpair->req_buf = calloc(num_requests, sizeof(struct nvme_request));
+ SPDK_CU_ASSERT_FATAL(qpair->req_buf != NULL);
+
+ for (i = 0; i < num_requests; i++) {
+ struct nvme_request *req = qpair->req_buf + i * sizeof(struct nvme_request);
+
+ req->qpair = qpair;
+ STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
+ }
+
+ g_request = NULL;
+}
+
+static void
+cleanup_after_test(struct spdk_nvme_qpair *qpair)
+{
+ free(qpair->req_buf);
+}
+
+static void
+nvme_cmd_interpret_rw(const struct spdk_nvme_cmd *cmd,
+ uint64_t *lba, uint32_t *num_blocks)
+{
+ *lba = *(const uint64_t *)&cmd->cdw10;
+ *num_blocks = (cmd->cdw12 & 0xFFFFu) + 1;
+}
+
+static void
+split_test(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_ctrlr ctrlr;
+ void *payload;
+ uint64_t lba, cmd_lba;
+ uint32_t lba_count, cmd_lba_count;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(512);
+ lba = 0;
+ lba_count = 1;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+
+ CU_ASSERT(g_request->num_children == 0);
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(cmd_lba == lba);
+ CU_ASSERT(cmd_lba_count == lba_count);
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+split_test2(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request *child;
+ void *payload;
+ uint64_t lba, cmd_lba;
+ uint32_t lba_count, cmd_lba_count;
+ int rc;
+
+ /*
+ * Controller has max xfer of 128 KB (256 blocks).
+ * Submit an I/O of 256 KB starting at LBA 0, which should be split
+ * on the max I/O boundary into two I/Os of 128 KB.
+ */
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(256 * 1024);
+ lba = 0;
+ lba_count = (256 * 1024) / 512;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+
+ CU_ASSERT(g_request->num_children == 2);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(cmd_lba == 0);
+ CU_ASSERT(cmd_lba_count == 256); /* 256 * 512 byte blocks = 128 KB */
+ nvme_free_request(child);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(cmd_lba == 256);
+ CU_ASSERT(cmd_lba_count == 256);
+ nvme_free_request(child);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_request->children));
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+split_test3(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request *child;
+ void *payload;
+ uint64_t lba, cmd_lba;
+ uint32_t lba_count, cmd_lba_count;
+ int rc;
+
+ /*
+ * Controller has max xfer of 128 KB (256 blocks).
+ * Submit an I/O of 256 KB starting at LBA 10, which should be split
+ * into two I/Os:
+ * 1) LBA = 10, count = 256 blocks
+ * 2) LBA = 266, count = 256 blocks
+ */
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(256 * 1024);
+ lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
+ lba_count = (256 * 1024) / 512;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(cmd_lba == 10);
+ CU_ASSERT(cmd_lba_count == 256);
+ nvme_free_request(child);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(cmd_lba == 266);
+ CU_ASSERT(cmd_lba_count == 256);
+ nvme_free_request(child);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_request->children));
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+split_test4(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request *child;
+ void *payload;
+ uint64_t lba, cmd_lba;
+ uint32_t lba_count, cmd_lba_count;
+ int rc;
+
+ /*
+ * Controller has max xfer of 128 KB (256 blocks) and a stripe size of 128 KB.
+ * (Same as split_test3 except with driver-assisted striping enabled.)
+ * Submit an I/O of 256 KB starting at LBA 10, which should be split
+ * into three I/Os:
+ * 1) LBA = 10, count = 246 blocks (less than max I/O size to align to stripe size)
+ * 2) LBA = 256, count = 256 blocks (aligned to stripe size and max I/O size)
+ * 3) LBA = 512, count = 10 blocks (finish off the remaining I/O size)
+ */
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 128 * 1024, false);
+ payload = malloc(256 * 1024);
+ lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
+ lba_count = (256 * 1024) / 512;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 3);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == (256 - 10) * 512);
+ CU_ASSERT(child->payload_offset == 0);
+ CU_ASSERT(cmd_lba == 10);
+ CU_ASSERT(cmd_lba_count == 256 - 10);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
+ nvme_free_request(child);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(child->payload_offset == (256 - 10) * 512);
+ CU_ASSERT(cmd_lba == 256);
+ CU_ASSERT(cmd_lba_count == 256);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
+ nvme_free_request(child);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 10 * 512);
+ CU_ASSERT(child->payload_offset == (512 - 10) * 512);
+ CU_ASSERT(cmd_lba == 512);
+ CU_ASSERT(cmd_lba_count == 10);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
+ nvme_free_request(child);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_request->children));
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_cmd_child_request(void)
+{
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ struct nvme_request *child, *tmp;
+ void *payload;
+ uint64_t lba = 0x1000;
+ uint32_t i = 0;
+ uint32_t offset = 0;
+ uint32_t sector_size = 512;
+ uint32_t max_io_size = 128 * 1024;
+ uint32_t sectors_per_max_io = max_io_size / sector_size;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_io_size, 0, false);
+
+ payload = malloc(128 * 1024);
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io, NULL, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->payload_offset == 0);
+ CU_ASSERT(g_request->num_children == 0);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io - 1, NULL, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->payload_offset == 0);
+ CU_ASSERT(g_request->num_children == 0);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io * 4, NULL, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->num_children == 4);
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, (DEFAULT_IO_QUEUE_REQUESTS + 1) * sector_size,
+ NULL,
+ NULL, 0);
+ SPDK_CU_ASSERT_FATAL(rc == -EINVAL);
+
+ TAILQ_FOREACH_SAFE(child, &g_request->children, child_tailq, tmp) {
+ nvme_request_remove_child(g_request, child);
+ CU_ASSERT(child->payload_offset == offset);
+ CU_ASSERT(child->cmd.opc == SPDK_NVME_OPC_READ);
+ CU_ASSERT(child->cmd.nsid == ns.id);
+ CU_ASSERT(child->cmd.cdw10 == (lba + sectors_per_max_io * i));
+ CU_ASSERT(child->cmd.cdw12 == ((sectors_per_max_io - 1) | 0));
+ offset += max_io_size;
+ nvme_free_request(child);
+ i++;
+ }
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_flush(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_flush(&ns, &qpair, cb_fn, cb_arg);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_FLUSH);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_write_zeroes(void)
+{
+ struct spdk_nvme_ns ns = { 0 };
+ struct spdk_nvme_ctrlr ctrlr = { 0 };
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ uint64_t cmd_lba;
+ uint32_t cmd_lba_count;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_write_zeroes(&ns, &qpair, 0, 2, cb_fn, cb_arg, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE_ZEROES);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT_EQUAL(cmd_lba, 0);
+ CU_ASSERT_EQUAL(cmd_lba_count, 2);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_write_uncorrectable(void)
+{
+ struct spdk_nvme_ns ns = { 0 };
+ struct spdk_nvme_ctrlr ctrlr = { 0 };
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ uint64_t cmd_lba;
+ uint32_t cmd_lba_count;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_write_uncorrectable(&ns, &qpair, 0, 2, cb_fn, cb_arg);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE_UNCORRECTABLE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT_EQUAL(cmd_lba, 0);
+ CU_ASSERT_EQUAL(cmd_lba_count, 2);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_dataset_management(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ struct spdk_nvme_dsm_range ranges[256];
+ uint16_t i;
+ int rc = 0;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ for (i = 0; i < 256; i++) {
+ ranges[i].starting_lba = i;
+ ranges[i].length = 1;
+ ranges[i].attributes.raw = 0;
+ }
+
+ /* TRIM one LBA */
+ rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
+ ranges, 1, cb_fn, cb_arg);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == 0);
+ CU_ASSERT(g_request->cmd.cdw11_bits.dsm.ad == 1);
+ spdk_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+
+ /* TRIM 256 LBAs */
+ rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
+ ranges, 256, cb_fn, cb_arg);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == 255u);
+ CU_ASSERT(g_request->cmd.cdw11_bits.dsm.ad == 1);
+ spdk_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
+ NULL, 0, cb_fn, cb_arg);
+ CU_ASSERT(rc != 0);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_readv(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ void *cb_arg;
+ uint32_t lba_count = 256;
+ uint32_t sector_size = 512;
+ uint64_t sge_length = lba_count * sector_size;
+
+ cb_arg = malloc(512);
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
+ rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, lba_count, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_READ);
+ CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
+ CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
+ CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0, nvme_request_reset_sgl,
+ NULL);
+ CU_ASSERT(rc != 0);
+
+ free(cb_arg);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_writev(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ void *cb_arg;
+ uint32_t lba_count = 256;
+ uint32_t sector_size = 512;
+ uint64_t sge_length = lba_count * sector_size;
+
+ cb_arg = malloc(512);
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
+ rc = spdk_nvme_ns_cmd_writev(&ns, &qpair, 0x1000, lba_count, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE);
+ CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
+ CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
+ CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ rc = spdk_nvme_ns_cmd_writev(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0,
+ NULL, nvme_request_next_sge);
+ CU_ASSERT(rc != 0);
+
+ free(cb_arg);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_comparev(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ void *cb_arg;
+ uint32_t lba_count = 256;
+ uint32_t sector_size = 512;
+ uint64_t sge_length = lba_count * sector_size;
+
+ cb_arg = malloc(512);
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
+ rc = spdk_nvme_ns_cmd_comparev(&ns, &qpair, 0x1000, lba_count, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_COMPARE);
+ CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
+ CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
+ CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ rc = spdk_nvme_ns_cmd_comparev(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0,
+ nvme_request_reset_sgl, NULL);
+ CU_ASSERT(rc != 0);
+
+ free(cb_arg);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_comparev_with_md(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ char *buffer = NULL;
+ char *metadata = NULL;
+ uint32_t block_size, md_size;
+ struct nvme_request *child0, *child1;
+ uint32_t lba_count = 256;
+ uint32_t sector_size = 512;
+ uint64_t sge_length = lba_count * sector_size;
+
+ block_size = 512;
+ md_size = 128;
+
+ buffer = malloc((block_size + md_size) * 384);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ metadata = malloc(md_size * 384);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 256, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge, metadata, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * (512 + 128) bytes per block = two I/Os:
+ * child 0: 204 blocks - 204 * (512 + 128) = 127.5 KB
+ * child 1: 52 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 256, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge, NULL, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 204 * (512 + 128));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 204 * (512 + 128));
+ CU_ASSERT(child1->payload_size == 52 * (512 + 128));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * No protection information
+ *
+ * 256 blocks * (512 + 8) bytes per block = two I/Os:
+ * child 0: 252 blocks - 252 * (512 + 8) = 127.96875 KB
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 256, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge, NULL, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * (512 + 8));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * (512 + 8));
+ CU_ASSERT(child1->payload_size == 4 * (512 + 8));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * Special case for 8-byte metadata + PI + PRACT: no metadata transferred
+ * In theory, 256 blocks * 512 bytes per block = one I/O (128 KB)
+ * However, the splitting code does not account for PRACT when calculating
+ * max sectors per transfer, so we actually get two I/Os:
+ * child 0: 252 blocks
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 256, NULL, &sge_length,
+ SPDK_NVME_IO_FLAGS_PRACT, nvme_request_reset_sgl, nvme_request_next_sge, NULL, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * 512); /* NOTE: does not include metadata! */
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * 512);
+ CU_ASSERT(child1->payload_size == 4 * 512);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 256, NULL, &sge_length,
+ SPDK_NVME_IO_FLAGS_PRACT, nvme_request_reset_sgl, nvme_request_next_sge, metadata, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * 384 blocks * 512 bytes = two I/Os:
+ * child 0: 256 blocks
+ * child 1: 128 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_comparev_with_md(&ns, &qpair, 0x1000, 384, NULL, &sge_length,
+ SPDK_NVME_IO_FLAGS_PRACT, nvme_request_reset_sgl, nvme_request_next_sge, metadata, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 256 * 512);
+ CU_ASSERT(child0->md_offset == 0);
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload_offset == 256 * 512);
+ CU_ASSERT(child1->payload_size == 128 * 512);
+ CU_ASSERT(child1->md_offset == 256 * 8);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ns_cmd_compare_and_write(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ uint64_t lba = 0x1000;
+ uint32_t lba_count = 256;
+ uint64_t cmd_lba;
+ uint32_t cmd_lba_count;
+ uint32_t sector_size = 512;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_compare(&ns, &qpair, NULL, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_FUSE_FIRST);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_COMPARE);
+ CU_ASSERT(g_request->cmd.fuse == SPDK_NVME_CMD_FUSE_FIRST);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT_EQUAL(cmd_lba, lba);
+ CU_ASSERT_EQUAL(cmd_lba_count, lba_count);
+
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_write(&ns, &qpair, NULL, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_FUSE_SECOND);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE);
+ CU_ASSERT(g_request->cmd.fuse == SPDK_NVME_CMD_FUSE_SECOND);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT_EQUAL(cmd_lba, lba);
+ CU_ASSERT_EQUAL(cmd_lba_count, lba_count);
+
+ nvme_free_request(g_request);
+
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_io_flags(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ void *payload;
+ uint64_t lba;
+ uint32_t lba_count;
+ uint64_t cmd_lba;
+ uint32_t cmd_lba_count;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 128 * 1024, false);
+ payload = malloc(256 * 1024);
+ lba = 0;
+ lba_count = (4 * 1024) / 512;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
+ CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_LIMITED_RETRY);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) == 0);
+ CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) != 0);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_write(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_VALID_MASK);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT_EQUAL(cmd_lba_count, lba_count);
+ CU_ASSERT_EQUAL(cmd_lba, lba);
+ CU_ASSERT_EQUAL(g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_CDW12_MASK,
+ SPDK_NVME_IO_FLAGS_CDW12_MASK);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_write(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
+ ~SPDK_NVME_IO_FLAGS_VALID_MASK);
+ CU_ASSERT(rc == -EINVAL);
+
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_reservation_register(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_reservation_register_data *payload;
+ bool ignore_key = 1;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc = 0;
+ uint32_t tmp_cdw10;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(sizeof(struct spdk_nvme_reservation_register_data));
+
+ rc = spdk_nvme_ns_cmd_reservation_register(&ns, &qpair, payload, ignore_key,
+ SPDK_NVME_RESERVE_REGISTER_KEY,
+ SPDK_NVME_RESERVE_PTPL_NO_CHANGES,
+ cb_fn, cb_arg);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_REGISTER);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ tmp_cdw10 = SPDK_NVME_RESERVE_REGISTER_KEY;
+ tmp_cdw10 |= ignore_key ? 1 << 3 : 0;
+ tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_PTPL_NO_CHANGES << 30;
+
+ CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
+
+ spdk_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_reservation_release(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_reservation_key_data *payload;
+ bool ignore_key = 1;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc = 0;
+ uint32_t tmp_cdw10;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(sizeof(struct spdk_nvme_reservation_key_data));
+
+ rc = spdk_nvme_ns_cmd_reservation_release(&ns, &qpair, payload, ignore_key,
+ SPDK_NVME_RESERVE_RELEASE,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
+ cb_fn, cb_arg);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_RELEASE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ tmp_cdw10 = SPDK_NVME_RESERVE_RELEASE;
+ tmp_cdw10 |= ignore_key ? 1 << 3 : 0;
+ tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_WRITE_EXCLUSIVE << 8;
+
+ CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
+
+ spdk_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_reservation_acquire(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_reservation_acquire_data *payload;
+ bool ignore_key = 1;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc = 0;
+ uint32_t tmp_cdw10;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(sizeof(struct spdk_nvme_reservation_acquire_data));
+
+ rc = spdk_nvme_ns_cmd_reservation_acquire(&ns, &qpair, payload, ignore_key,
+ SPDK_NVME_RESERVE_ACQUIRE,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
+ cb_fn, cb_arg);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_ACQUIRE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ tmp_cdw10 = SPDK_NVME_RESERVE_ACQUIRE;
+ tmp_cdw10 |= ignore_key ? 1 << 3 : 0;
+ tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_WRITE_EXCLUSIVE << 8;
+
+ CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
+
+ spdk_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_reservation_report(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_reservation_status_data *payload;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc = 0;
+ uint32_t size = sizeof(struct spdk_nvme_reservation_status_data);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ payload = calloc(1, size);
+ SPDK_CU_ASSERT_FATAL(payload != NULL);
+
+ rc = spdk_nvme_ns_cmd_reservation_report(&ns, &qpair, payload, size, cb_fn, cb_arg);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_REPORT);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ CU_ASSERT(g_request->cmd.cdw10 == (size / 4));
+
+ spdk_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_write_with_md(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ char *buffer = NULL;
+ char *metadata = NULL;
+ uint32_t block_size, md_size;
+ struct nvme_request *child0, *child1;
+
+ block_size = 512;
+ md_size = 128;
+
+ buffer = malloc((block_size + md_size) * 384);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ metadata = malloc(md_size * 384);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL, 0, 0,
+ 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->md_size == 256 * 128);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * (512 + 128) bytes per block = two I/Os:
+ * child 0: 204 blocks - 204 * (512 + 128) = 127.5 KB
+ * child 1: 52 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL, 0, 0,
+ 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 204 * (512 + 128));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 204 * (512 + 128));
+ CU_ASSERT(child1->payload_size == 52 * (512 + 128));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * No protection information
+ *
+ * 256 blocks * (512 + 8) bytes per block = two I/Os:
+ * child 0: 252 blocks - 252 * (512 + 8) = 127.96875 KB
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL, 0, 0,
+ 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * (512 + 8));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * (512 + 8));
+ CU_ASSERT(child1->payload_size == 4 * (512 + 8));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * Special case for 8-byte metadata + PI + PRACT: no metadata transferred
+ * In theory, 256 blocks * 512 bytes per block = one I/O (128 KB)
+ * However, the splitting code does not account for PRACT when calculating
+ * max sectors per transfer, so we actually get two I/Os:
+ * child 0: 252 blocks
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * 512); /* NOTE: does not include metadata! */
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * 512);
+ CU_ASSERT(child1->payload_size == 4 * 512);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->md_size == 256 * 8);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * 384 blocks * 512 bytes = two I/Os:
+ * child 0: 256 blocks
+ * child 1: 128 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 384, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 256 * 512);
+ CU_ASSERT(child0->md_offset == 0);
+ CU_ASSERT(child0->md_size == 256 * 8);
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload_offset == 256 * 512);
+ CU_ASSERT(child1->payload_size == 128 * 512);
+ CU_ASSERT(child1->md_offset == 256 * 8);
+ CU_ASSERT(child1->md_size == 128 * 8);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ns_cmd_read_with_md(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ char *buffer = NULL;
+ char *metadata = NULL;
+ uint32_t block_size, md_size;
+
+ block_size = 512;
+ md_size = 128;
+
+ buffer = malloc(block_size * 256);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ metadata = malloc(md_size * 256);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_read_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL, 0, 0,
+ 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->md_size == 256 * md_size);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ns_cmd_compare_with_md(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ char *buffer = NULL;
+ char *metadata = NULL;
+ uint32_t block_size, md_size;
+ struct nvme_request *child0, *child1;
+
+ block_size = 512;
+ md_size = 128;
+
+ buffer = malloc((block_size + md_size) * 384);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ metadata = malloc(md_size * 384);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256,
+ NULL, NULL, 0, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * (512 + 128) bytes per block = two I/Os:
+ * child 0: 204 blocks - 204 * (512 + 128) = 127.5 KB
+ * child 1: 52 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256,
+ NULL, NULL, 0, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 204 * (512 + 128));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 204 * (512 + 128));
+ CU_ASSERT(child1->payload_size == 52 * (512 + 128));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * No protection information
+ *
+ * 256 blocks * (512 + 8) bytes per block = two I/Os:
+ * child 0: 252 blocks - 252 * (512 + 8) = 127.96875 KB
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256,
+ NULL, NULL, 0, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * (512 + 8));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * (512 + 8));
+ CU_ASSERT(child1->payload_size == 4 * (512 + 8));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * Special case for 8-byte metadata + PI + PRACT: no metadata transferred
+ * In theory, 256 blocks * 512 bytes per block = one I/O (128 KB)
+ * However, the splitting code does not account for PRACT when calculating
+ * max sectors per transfer, so we actually get two I/Os:
+ * child 0: 252 blocks
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256,
+ NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * 512); /* NOTE: does not include metadata! */
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * 512);
+ CU_ASSERT(child1->payload_size == 4 * 512);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256,
+ NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * 384 blocks * 512 bytes = two I/Os:
+ * child 0: 256 blocks
+ * child 1: 128 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 384,
+ NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 256 * 512);
+ CU_ASSERT(child0->md_offset == 0);
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload_offset == 256 * 512);
+ CU_ASSERT(child1->payload_size == 128 * 512);
+ CU_ASSERT(child1->md_offset == 256 * 8);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_ns_cmd", NULL, NULL);
+
+ CU_ADD_TEST(suite, split_test);
+ CU_ADD_TEST(suite, split_test2);
+ CU_ADD_TEST(suite, split_test3);
+ CU_ADD_TEST(suite, split_test4);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_flush);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_dataset_management);
+ CU_ADD_TEST(suite, test_io_flags);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_write_zeroes);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_write_uncorrectable);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_reservation_register);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_reservation_release);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_reservation_acquire);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_reservation_report);
+ CU_ADD_TEST(suite, test_cmd_child_request);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_readv);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_read_with_md);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_writev);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_write_with_md);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_comparev);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_compare_and_write);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_compare_with_md);
+ CU_ADD_TEST(suite, test_nvme_ns_cmd_comparev_with_md);
+
+ g_spdk_nvme_driver = &_g_nvme_driver;
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/.gitignore
new file mode 100644
index 000000000..8f4f47a17
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/.gitignore
@@ -0,0 +1 @@
+nvme_ns_ocssd_cmd_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/Makefile
new file mode 100644
index 000000000..35fdb83a0
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ns_ocssd_cmd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut.c
new file mode 100644
index 000000000..fa25a4640
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut.c
@@ -0,0 +1,650 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_ns_ocssd_cmd.c"
+#include "nvme/nvme_ns_cmd.c"
+#include "nvme/nvme.c"
+
+#include "common/lib/test_env.c"
+
+#define OCSSD_SECTOR_SIZE 0x1000
+
+static struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+static struct nvme_request *g_request = NULL;
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ g_request = req;
+
+ return 0;
+}
+
+void
+nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+}
+
+void
+nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return;
+}
+
+int
+nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+void
+nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return;
+}
+
+void
+spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
+{
+ memset(opts, 0, sizeof(*opts));
+}
+
+bool
+spdk_nvme_transport_available_by_name(const char *transport_name)
+{
+ return true;
+}
+
+struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ void *devhandle)
+{
+ return NULL;
+}
+
+int
+nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_scan(struct spdk_nvme_probe_ctx *probe_ctx,
+ bool direct_connect)
+{
+ return 0;
+}
+
+uint32_t
+spdk_nvme_ns_get_max_io_xfer_size(struct spdk_nvme_ns *ns)
+{
+ return ns->ctrlr->max_xfer_size;
+}
+
+static void
+prepare_for_test(struct spdk_nvme_ns *ns, struct spdk_nvme_ctrlr *ctrlr,
+ struct spdk_nvme_qpair *qpair,
+ uint32_t sector_size, uint32_t md_size, uint32_t max_xfer_size,
+ uint32_t stripe_size, bool extended_lba)
+{
+ uint32_t num_requests = 32;
+ uint32_t i;
+
+ ctrlr->max_xfer_size = max_xfer_size;
+ /*
+ * Clear the flags field - we especially want to make sure the SGL_SUPPORTED flag is not set
+ * so that we test the SGL splitting path.
+ */
+ ctrlr->flags = 0;
+ ctrlr->min_page_size = 4096;
+ ctrlr->page_size = 4096;
+ memset(&ctrlr->opts, 0, sizeof(ctrlr->opts));
+ memset(ns, 0, sizeof(*ns));
+ ns->ctrlr = ctrlr;
+ ns->sector_size = sector_size;
+ ns->extended_lba_size = sector_size;
+ if (extended_lba) {
+ ns->flags |= SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED;
+ ns->extended_lba_size += md_size;
+ }
+ ns->md_size = md_size;
+ ns->sectors_per_max_io = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->extended_lba_size;
+ ns->sectors_per_stripe = stripe_size / ns->extended_lba_size;
+
+ memset(qpair, 0, sizeof(*qpair));
+ qpair->ctrlr = ctrlr;
+ qpair->req_buf = calloc(num_requests, sizeof(struct nvme_request));
+ SPDK_CU_ASSERT_FATAL(qpair->req_buf != NULL);
+
+ for (i = 0; i < num_requests; i++) {
+ struct nvme_request *req = qpair->req_buf + i * sizeof(struct nvme_request);
+
+ req->qpair = qpair;
+ STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
+ }
+
+ g_request = NULL;
+}
+
+static void
+cleanup_after_test(struct spdk_nvme_qpair *qpair)
+{
+ free(qpair->req_buf);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_reset_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ uint64_t lba_list = 0x12345678;
+ spdk_nvme_ocssd_ns_cmd_vector_reset(&ns, &qpair, &lba_list, 1,
+ NULL, NULL, NULL);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_RESET);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_reset(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ uint64_t lba_list[vector_size];
+ spdk_nvme_ocssd_ns_cmd_vector_reset(&ns, &qpair, lba_list, vector_size,
+ NULL, NULL, NULL);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_RESET);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_read_with_md_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t md_size = 0x80;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size);
+ char *metadata = malloc(md_size);
+ uint64_t lba_list = 0x12345678;
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
+ rc = spdk_nvme_ocssd_ns_cmd_vector_read_with_md(&ns, &qpair, buffer, metadata,
+ &lba_list, 1, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == OCSSD_SECTOR_SIZE);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_read_with_md(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t md_size = 0x80;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size * vector_size);
+ char *metadata = malloc(md_size * vector_size);
+ uint64_t lba_list[vector_size];
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
+ rc = spdk_nvme_ocssd_ns_cmd_vector_read_with_md(&ns, &qpair, buffer, metadata,
+ lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == max_xfer_size);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_read_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size);
+ uint64_t lba_list = 0x12345678;
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ rc = spdk_nvme_ocssd_ns_cmd_vector_read(&ns, &qpair, buffer, &lba_list, 1,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload_size == OCSSD_SECTOR_SIZE);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+ free(buffer);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_read(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size * vector_size);
+ uint64_t lba_list[vector_size];
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ rc = spdk_nvme_ocssd_ns_cmd_vector_read(&ns, &qpair, buffer, lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload_size == max_xfer_size);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+ free(buffer);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_write_with_md_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t md_size = 0x80;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size);
+ char *metadata = malloc(md_size);
+ uint64_t lba_list = 0x12345678;
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_write_with_md(&ns, &qpair, buffer, metadata,
+ &lba_list, 1, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == OCSSD_SECTOR_SIZE);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+
+static void
+test_nvme_ocssd_ns_cmd_vector_write_with_md(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t md_size = 0x80;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size * vector_size);
+ char *metadata = malloc(md_size * vector_size);
+ uint64_t lba_list[vector_size];
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_write_with_md(&ns, &qpair, buffer, metadata,
+ lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == max_xfer_size);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_write_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size);
+ uint64_t lba_list = 0x12345678;
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_write(&ns, &qpair, buffer,
+ &lba_list, 1, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload_size == OCSSD_SECTOR_SIZE);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_write(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size * vector_size);
+ uint64_t lba_list[vector_size];
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_write(&ns, &qpair, buffer,
+ lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload_size == max_xfer_size);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_copy_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ uint64_t src_lba_list = 0x12345678;
+ uint64_t dst_lba_list = 0x87654321;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_copy(&ns, &qpair, &dst_lba_list, &src_lba_list, 1,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_COPY);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == src_lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+ CU_ASSERT(g_request->cmd.cdw14 == dst_lba_list);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_copy(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = OCSSD_SECTOR_SIZE;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ uint64_t src_lba_list[vector_size];
+ uint64_t dst_lba_list[vector_size];
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_copy(&ns, &qpair,
+ dst_lba_list, src_lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_COPY);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_ns_cmd", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_reset);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_reset_single_entry);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_read_with_md);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_read_with_md_single_entry);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_read);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_read_single_entry);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_write_with_md);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_write_with_md_single_entry);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_write);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_write_single_entry);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_copy);
+ CU_ADD_TEST(suite, test_nvme_ocssd_ns_cmd_vector_copy_single_entry);
+
+ g_spdk_nvme_driver = &_g_nvme_driver;
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_pcie.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/.gitignore
new file mode 100644
index 000000000..8fc291095
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/.gitignore
@@ -0,0 +1 @@
+nvme_pcie_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_pcie.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/Makefile
new file mode 100644
index 000000000..09032a935
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_pcie_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c
new file mode 100644
index 000000000..ccc59b4da
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c
@@ -0,0 +1,498 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#define UNIT_TEST_NO_VTOPHYS
+
+#include "nvme/nvme_pcie.c"
+#include "common/lib/nvme/common_stubs.h"
+
+pid_t g_spdk_nvme_pid;
+DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
+DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
+
+DEFINE_STUB(nvme_get_quirks, uint64_t, (const struct spdk_pci_id *id), 0);
+
+DEFINE_STUB(nvme_wait_for_completion, int,
+ (struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status), 0);
+DEFINE_STUB_V(nvme_completion_poll_cb, (void *arg, const struct spdk_nvme_cpl *cpl));
+
+DEFINE_STUB(nvme_ctrlr_submit_admin_request, int, (struct spdk_nvme_ctrlr *ctrlr,
+ struct nvme_request *req), 0);
+DEFINE_STUB_V(nvme_ctrlr_free_processes, (struct spdk_nvme_ctrlr *ctrlr));
+DEFINE_STUB(nvme_ctrlr_proc_get_devhandle, struct spdk_pci_device *,
+ (struct spdk_nvme_ctrlr *ctrlr), NULL);
+
+DEFINE_STUB(spdk_pci_device_map_bar, int, (struct spdk_pci_device *dev, uint32_t bar,
+ void **mapped_addr, uint64_t *phys_addr, uint64_t *size), 0);
+DEFINE_STUB(spdk_pci_device_unmap_bar, int, (struct spdk_pci_device *dev, uint32_t bar, void *addr),
+ 0);
+DEFINE_STUB(spdk_pci_device_attach, int, (struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb,
+ void *enum_ctx, struct spdk_pci_addr *pci_address), 0);
+DEFINE_STUB(spdk_pci_device_claim, int, (struct spdk_pci_device *dev), 0);
+DEFINE_STUB_V(spdk_pci_device_unclaim, (struct spdk_pci_device *dev));
+DEFINE_STUB_V(spdk_pci_device_detach, (struct spdk_pci_device *device));
+DEFINE_STUB(spdk_pci_device_cfg_write16, int, (struct spdk_pci_device *dev, uint16_t value,
+ uint32_t offset), 0);
+DEFINE_STUB(spdk_pci_device_cfg_read16, int, (struct spdk_pci_device *dev, uint16_t *value,
+ uint32_t offset), 0);
+DEFINE_STUB(spdk_pci_device_get_id, struct spdk_pci_id, (struct spdk_pci_device *dev), {0})
+
+DEFINE_STUB(nvme_uevent_connect, int, (void), 0);
+
+struct spdk_log_flag SPDK_LOG_NVME = {
+ .name = "nvme",
+ .enabled = false,
+};
+
+struct nvme_driver *g_spdk_nvme_driver = NULL;
+
+bool g_device_is_enumerated = false;
+
+void
+nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
+{
+ CU_ASSERT(ctrlr != NULL);
+ if (hot_remove) {
+ ctrlr->is_removed = true;
+ }
+
+ ctrlr->is_failed = true;
+}
+
+struct spdk_uevent_entry {
+ struct spdk_uevent uevent;
+ STAILQ_ENTRY(spdk_uevent_entry) link;
+};
+
+static STAILQ_HEAD(, spdk_uevent_entry) g_uevents = STAILQ_HEAD_INITIALIZER(g_uevents);
+
+int
+nvme_get_uevent(int fd, struct spdk_uevent *uevent)
+{
+ struct spdk_uevent_entry *entry;
+
+ if (STAILQ_EMPTY(&g_uevents)) {
+ return 0;
+ }
+
+ entry = STAILQ_FIRST(&g_uevents);
+ STAILQ_REMOVE_HEAD(&g_uevents, link);
+
+ *uevent = entry->uevent;
+
+ return 1;
+}
+
+int
+spdk_pci_enumerate(struct spdk_pci_driver *driver, spdk_pci_enum_cb enum_cb, void *enum_ctx)
+{
+ g_device_is_enumerated = true;
+
+ return 0;
+}
+
+static uint64_t g_vtophys_size = 0;
+
+DEFINE_RETURN_MOCK(spdk_vtophys, uint64_t);
+uint64_t
+spdk_vtophys(void *buf, uint64_t *size)
+{
+ if (size) {
+ *size = g_vtophys_size;
+ }
+
+ HANDLE_RETURN_MOCK(spdk_vtophys);
+
+ return (uintptr_t)buf;
+}
+
+DEFINE_STUB(spdk_pci_device_get_addr, struct spdk_pci_addr, (struct spdk_pci_device *dev), {});
+DEFINE_STUB(nvme_ctrlr_probe, int, (const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_probe_ctx *probe_ctx, void *devhandle), 0);
+DEFINE_STUB(spdk_pci_device_is_removed, bool, (struct spdk_pci_device *dev), false);
+DEFINE_STUB(nvme_get_ctrlr_by_trid_unsafe, struct spdk_nvme_ctrlr *,
+ (const struct spdk_nvme_transport_id *trid), NULL);
+DEFINE_STUB(spdk_nvme_ctrlr_get_regs_csts, union spdk_nvme_csts_register,
+ (struct spdk_nvme_ctrlr *ctrlr), {});
+DEFINE_STUB(nvme_ctrlr_get_process, struct spdk_nvme_ctrlr_process *,
+ (struct spdk_nvme_ctrlr *ctrlr, pid_t pid), NULL);
+DEFINE_STUB(nvme_completion_is_retry, bool, (const struct spdk_nvme_cpl *cpl), false);
+DEFINE_STUB_V(spdk_nvme_qpair_print_command, (struct spdk_nvme_qpair *qpair,
+ struct spdk_nvme_cmd *cmd));
+DEFINE_STUB_V(spdk_nvme_qpair_print_completion, (struct spdk_nvme_qpair *qpair,
+ struct spdk_nvme_cpl *cpl));
+
+static void
+prp_list_prep(struct nvme_tracker *tr, struct nvme_request *req, uint32_t *prp_index)
+{
+ memset(req, 0, sizeof(*req));
+ memset(tr, 0, sizeof(*tr));
+ tr->req = req;
+ tr->prp_sgl_bus_addr = 0xDEADBEEF;
+ *prp_index = 0;
+}
+
+static void
+test_prp_list_append(void)
+{
+ struct nvme_request req;
+ struct nvme_tracker tr;
+ uint32_t prp_index;
+
+ /* Non-DWORD-aligned buffer (invalid) */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100001, 0x1000, 0x1000) == -EFAULT);
+
+ /* 512-byte buffer, 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x200, 0x1000) == 0);
+ CU_ASSERT(prp_index == 1);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+
+ /* 512-byte buffer, non-4K-aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x108000, 0x200, 0x1000) == 0);
+ CU_ASSERT(prp_index == 1);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x108000);
+
+ /* 4K buffer, 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 1);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+
+ /* 4K buffer, non-4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
+
+ /* 8K buffer, 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x2000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
+
+ /* 8K buffer, non-4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x2000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 3);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
+ CU_ASSERT(tr.u.prp[0] == 0x101000);
+ CU_ASSERT(tr.u.prp[1] == 0x102000);
+
+ /* 12K buffer, 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x3000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 3);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
+ CU_ASSERT(tr.u.prp[0] == 0x101000);
+ CU_ASSERT(tr.u.prp[1] == 0x102000);
+
+ /* 12K buffer, non-4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x3000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 4);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
+ CU_ASSERT(tr.u.prp[0] == 0x101000);
+ CU_ASSERT(tr.u.prp[1] == 0x102000);
+ CU_ASSERT(tr.u.prp[2] == 0x103000);
+
+ /* Two 4K buffers, both 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 1);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900000, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x900000);
+
+ /* Two 4K buffers, first non-4K aligned, second 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900000, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 3);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
+ CU_ASSERT(tr.u.prp[0] == 0x101000);
+ CU_ASSERT(tr.u.prp[1] == 0x900000);
+
+ /* Two 4K buffers, both non-4K aligned (invalid) */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900800, 0x1000, 0x1000) == -EFAULT);
+ CU_ASSERT(prp_index == 2);
+
+ /* 4K buffer, 4K aligned, but vtophys fails */
+ MOCK_SET(spdk_vtophys, SPDK_VTOPHYS_ERROR);
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000, 0x1000) == -EFAULT);
+ MOCK_CLEAR(spdk_vtophys);
+
+ /* Largest aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000,
+ (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
+
+ /* Largest non-4K-aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800,
+ NVME_MAX_PRP_LIST_ENTRIES * 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
+
+ /* Buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000,
+ (NVME_MAX_PRP_LIST_ENTRIES + 2) * 0x1000, 0x1000) == -EFAULT);
+
+ /* Non-4K-aligned buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800,
+ (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == -EFAULT);
+}
+
+static void
+test_nvme_pcie_hotplug_monitor(void)
+{
+ struct nvme_pcie_ctrlr pctrlr = {};
+ struct spdk_uevent_entry entry = {};
+ struct nvme_driver driver;
+ pthread_mutexattr_t attr;
+ struct spdk_nvme_probe_ctx test_nvme_probe_ctx = {};
+
+ /* Initiate variables and ctrlr */
+ driver.initialized = true;
+ driver.hotplug_fd = 123;
+ CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
+ CU_ASSERT(pthread_mutex_init(&driver.lock, &attr) == 0);
+ TAILQ_INIT(&driver.shared_attached_ctrlrs);
+ g_spdk_nvme_driver = &driver;
+
+ /* Case 1: SPDK_NVME_UEVENT_ADD/ NVME_VFIO */
+ entry.uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_VFIO;
+ entry.uevent.action = SPDK_NVME_UEVENT_ADD;
+ snprintf(entry.uevent.traddr, sizeof(entry.uevent.traddr), "0000:05:00.0");
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ STAILQ_INSERT_TAIL(&g_uevents, &entry, link);
+
+ _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
+
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ CU_ASSERT(g_device_is_enumerated == true);
+ g_device_is_enumerated = false;
+
+ /* Case 2: SPDK_NVME_UEVENT_ADD/ NVME_UIO */
+ entry.uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_UIO;
+ entry.uevent.action = SPDK_NVME_UEVENT_ADD;
+ snprintf(entry.uevent.traddr, sizeof(entry.uevent.traddr), "0000:05:00.0");
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ STAILQ_INSERT_TAIL(&g_uevents, &entry, link);
+
+ _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
+
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ CU_ASSERT(g_device_is_enumerated == true);
+ g_device_is_enumerated = false;
+
+ /* Case 3: SPDK_NVME_UEVENT_REMOVE/ NVME_UIO */
+ entry.uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_UIO;
+ entry.uevent.action = SPDK_NVME_UEVENT_REMOVE;
+ snprintf(entry.uevent.traddr, sizeof(entry.uevent.traddr), "0000:05:00.0");
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ STAILQ_INSERT_TAIL(&g_uevents, &entry, link);
+
+ MOCK_SET(nvme_get_ctrlr_by_trid_unsafe, &pctrlr.ctrlr);
+
+ _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
+
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ CU_ASSERT(pctrlr.ctrlr.is_failed == true);
+ pctrlr.ctrlr.is_failed = false;
+ MOCK_CLEAR(nvme_get_ctrlr_by_trid_unsafe);
+
+ /* Case 4: SPDK_NVME_UEVENT_REMOVE/ NVME_VFIO */
+ entry.uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_VFIO;
+ entry.uevent.action = SPDK_NVME_UEVENT_REMOVE;
+ snprintf(entry.uevent.traddr, sizeof(entry.uevent.traddr), "0000:05:00.0");
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ STAILQ_INSERT_TAIL(&g_uevents, &entry, link);
+ MOCK_SET(nvme_get_ctrlr_by_trid_unsafe, &pctrlr.ctrlr);
+
+ _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
+
+ CU_ASSERT(STAILQ_EMPTY(&g_uevents));
+ CU_ASSERT(pctrlr.ctrlr.is_failed == true);
+ pctrlr.ctrlr.is_failed = false;
+ MOCK_CLEAR(nvme_get_ctrlr_by_trid_unsafe);
+
+ /* Case 5: Removed device detected in another process */
+ pctrlr.ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ snprintf(pctrlr.ctrlr.trid.traddr, sizeof(pctrlr.ctrlr.trid.traddr), "0000:02:00.0");
+ pctrlr.ctrlr.remove_cb = NULL;
+ pctrlr.ctrlr.is_failed = false;
+ pctrlr.ctrlr.is_removed = false;
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &pctrlr.ctrlr, tailq);
+
+ MOCK_SET(spdk_pci_device_is_removed, false);
+
+ _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
+
+ CU_ASSERT(pctrlr.ctrlr.is_failed == false);
+
+ MOCK_SET(spdk_pci_device_is_removed, true);
+
+ _nvme_pcie_hotplug_monitor(&test_nvme_probe_ctx);
+
+ CU_ASSERT(pctrlr.ctrlr.is_failed == true);
+
+ pthread_mutex_destroy(&driver.lock);
+ pthread_mutexattr_destroy(&attr);
+ g_spdk_nvme_driver = NULL;
+}
+
+static void test_shadow_doorbell_update(void)
+{
+ bool ret;
+
+ /* nvme_pcie_qpair_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old) */
+ ret = nvme_pcie_qpair_need_event(10, 15, 14);
+ CU_ASSERT(ret == false);
+
+ ret = nvme_pcie_qpair_need_event(14, 15, 14);
+ CU_ASSERT(ret == true);
+}
+
+static void
+test_build_contig_hw_sgl_request(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct nvme_request req = {};
+ struct nvme_tracker tr = {};
+ int rc;
+
+ /* Test 1: Payload covered by a single mapping */
+ req.payload_size = 100;
+ req.payload = NVME_PAYLOAD_CONTIG(0, 0);
+ g_vtophys_size = 100;
+ MOCK_SET(spdk_vtophys, 0xDEADBEEF);
+
+ rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDEADBEEF);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 100);
+
+ MOCK_CLEAR(spdk_vtophys);
+ g_vtophys_size = 0;
+ memset(&qpair, 0, sizeof(qpair));
+ memset(&req, 0, sizeof(req));
+ memset(&tr, 0, sizeof(tr));
+
+ /* Test 2: Payload covered by a single mapping, but request is at an offset */
+ req.payload_size = 100;
+ req.payload_offset = 50;
+ req.payload = NVME_PAYLOAD_CONTIG(0, 0);
+ g_vtophys_size = 1000;
+ MOCK_SET(spdk_vtophys, 0xDEADBEEF);
+
+ rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == 0xDEADBEEF);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 100);
+
+ MOCK_CLEAR(spdk_vtophys);
+ g_vtophys_size = 0;
+ memset(&qpair, 0, sizeof(qpair));
+ memset(&req, 0, sizeof(req));
+ memset(&tr, 0, sizeof(tr));
+
+ /* Test 3: Payload spans two mappings */
+ req.payload_size = 100;
+ req.payload = NVME_PAYLOAD_CONTIG(0, 0);
+ g_vtophys_size = 60;
+ tr.prp_sgl_bus_addr = 0xFF0FF;
+ MOCK_SET(spdk_vtophys, 0xDEADBEEF);
+
+ rc = nvme_pcie_qpair_build_contig_hw_sgl_request(&qpair, &req, &tr, 0);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == tr.prp_sgl_bus_addr);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 2 * sizeof(struct spdk_nvme_sgl_descriptor));
+ CU_ASSERT(tr.u.sgl[0].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(tr.u.sgl[0].unkeyed.length == 60);
+ CU_ASSERT(tr.u.sgl[0].address == 0xDEADBEEF);
+ CU_ASSERT(tr.u.sgl[1].unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(tr.u.sgl[1].unkeyed.length == 40);
+ CU_ASSERT(tr.u.sgl[1].address == 0xDEADBEEF);
+
+ MOCK_CLEAR(spdk_vtophys);
+ g_vtophys_size = 0;
+ memset(&qpair, 0, sizeof(qpair));
+ memset(&req, 0, sizeof(req));
+ memset(&tr, 0, sizeof(tr));
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_pcie", NULL, NULL);
+ CU_ADD_TEST(suite, test_prp_list_append);
+ CU_ADD_TEST(suite, test_nvme_pcie_hotplug_monitor);
+ CU_ADD_TEST(suite, test_shadow_doorbell_update);
+ CU_ADD_TEST(suite, test_build_contig_hw_sgl_request);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/.gitignore
new file mode 100644
index 000000000..e4223e112
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/.gitignore
@@ -0,0 +1 @@
+nvme_poll_group_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/Makefile
new file mode 100644
index 000000000..4715b5449
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_poll_group_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/nvme_poll_group_ut.c b/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/nvme_poll_group_ut.c
new file mode 100644
index 000000000..1503a49c5
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_poll_group.c/nvme_poll_group_ut.c
@@ -0,0 +1,484 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_poll_group.c"
+#include "common/lib/test_env.c"
+
+struct spdk_nvme_transport {
+ const char name[32];
+ TAILQ_ENTRY(spdk_nvme_transport) link;
+};
+
+struct spdk_nvme_transport t1 = {
+ .name = "transport1",
+};
+
+struct spdk_nvme_transport t2 = {
+ .name = "transport2",
+};
+
+struct spdk_nvme_transport t3 = {
+ .name = "transport3",
+};
+
+struct spdk_nvme_transport t4 = {
+ .name = "transport4",
+};
+
+int64_t g_process_completions_return_value = 0;
+int g_destroy_return_value = 0;
+
+TAILQ_HEAD(nvme_transport_list, spdk_nvme_transport) g_spdk_nvme_transports =
+ TAILQ_HEAD_INITIALIZER(g_spdk_nvme_transports);
+
+static void
+unit_test_disconnected_qpair_cb(struct spdk_nvme_qpair *qpair, void *poll_group_ctx)
+{
+
+}
+
+const struct spdk_nvme_transport *
+nvme_get_first_transport(void)
+{
+ return TAILQ_FIRST(&g_spdk_nvme_transports);
+}
+
+const struct spdk_nvme_transport *
+nvme_get_next_transport(const struct spdk_nvme_transport *transport)
+{
+ return TAILQ_NEXT(transport, link);
+}
+
+int
+nvme_transport_poll_group_disconnect_qpair(struct spdk_nvme_qpair *qpair)
+{
+ struct spdk_nvme_transport_poll_group *tgroup;
+ struct spdk_nvme_qpair *iter_qp, *tmp_iter_qp;
+
+ tgroup = qpair->poll_group;
+
+ STAILQ_FOREACH_SAFE(iter_qp, &tgroup->connected_qpairs, poll_group_stailq, tmp_iter_qp) {
+ if (qpair == iter_qp) {
+ STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
+ STAILQ_INSERT_TAIL(&tgroup->disconnected_qpairs, qpair, poll_group_stailq);
+ return 0;
+ }
+ }
+
+ STAILQ_FOREACH(iter_qp, &tgroup->disconnected_qpairs, poll_group_stailq) {
+ if (qpair == iter_qp) {
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int
+nvme_transport_poll_group_connect_qpair(struct spdk_nvme_qpair *qpair)
+{
+ struct spdk_nvme_transport_poll_group *tgroup;
+ struct spdk_nvme_qpair *iter_qp, *tmp_iter_qp;
+
+ tgroup = qpair->poll_group;
+
+ STAILQ_FOREACH_SAFE(iter_qp, &tgroup->disconnected_qpairs, poll_group_stailq, tmp_iter_qp) {
+ if (qpair == iter_qp) {
+ STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
+ STAILQ_INSERT_TAIL(&tgroup->connected_qpairs, qpair, poll_group_stailq);
+ return 0;
+ }
+ }
+
+ STAILQ_FOREACH(iter_qp, &tgroup->connected_qpairs, poll_group_stailq) {
+ if (qpair == iter_qp) {
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+struct spdk_nvme_transport_poll_group *
+nvme_transport_poll_group_create(const struct spdk_nvme_transport *transport)
+{
+ struct spdk_nvme_transport_poll_group *group = NULL;
+
+ /* TODO: separate this transport function table from the transport specific one. */
+ group = calloc(1, sizeof(*group));
+ if (group) {
+ group->transport = transport;
+ STAILQ_INIT(&group->connected_qpairs);
+ STAILQ_INIT(&group->disconnected_qpairs);
+ }
+
+ return group;
+}
+
+int
+nvme_transport_poll_group_destroy(struct spdk_nvme_transport_poll_group *tgroup)
+{
+ return g_destroy_return_value;
+}
+
+int
+nvme_transport_poll_group_add(struct spdk_nvme_transport_poll_group *tgroup,
+ struct spdk_nvme_qpair *qpair)
+{
+ STAILQ_INSERT_TAIL(&tgroup->connected_qpairs, qpair, poll_group_stailq);
+ qpair->poll_group = tgroup;
+
+ return 0;
+}
+
+int
+nvme_transport_poll_group_remove(struct spdk_nvme_transport_poll_group *tgroup,
+ struct spdk_nvme_qpair *qpair)
+{
+ struct spdk_nvme_qpair *iter_qp, *tmp_iter_qp;
+
+ STAILQ_FOREACH_SAFE(iter_qp, &tgroup->connected_qpairs, poll_group_stailq, tmp_iter_qp) {
+ if (qpair == iter_qp) {
+ STAILQ_REMOVE(&tgroup->connected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
+ return 0;
+ }
+ }
+
+ STAILQ_FOREACH_SAFE(iter_qp, &tgroup->disconnected_qpairs, poll_group_stailq, tmp_iter_qp) {
+ if (qpair == iter_qp) {
+ STAILQ_REMOVE(&tgroup->disconnected_qpairs, qpair, spdk_nvme_qpair, poll_group_stailq);
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
+int64_t
+nvme_transport_poll_group_process_completions(struct spdk_nvme_transport_poll_group *group,
+ uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb)
+{
+ return g_process_completions_return_value;
+}
+
+static void
+test_spdk_nvme_poll_group_create(void)
+{
+ struct spdk_nvme_poll_group *group;
+
+ /* basic case - create a poll group with no internal transport poll groups. */
+ group = spdk_nvme_poll_group_create(NULL);
+
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+ CU_ASSERT(STAILQ_EMPTY(&group->tgroups));
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
+
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t1, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t2, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t3, link);
+
+ /* advanced case - create a poll group with three internal poll groups. */
+ group = spdk_nvme_poll_group_create(NULL);
+ CU_ASSERT(STAILQ_EMPTY(&group->tgroups));
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
+
+ /* Failing case - failed to allocate a poll group. */
+ MOCK_SET(calloc, NULL);
+ group = spdk_nvme_poll_group_create(NULL);
+ CU_ASSERT(group == NULL);
+ MOCK_CLEAR(calloc);
+
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t1, link);
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t2, link);
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t3, link);
+}
+
+static void
+test_spdk_nvme_poll_group_add_remove(void)
+{
+ struct spdk_nvme_poll_group *group;
+ struct spdk_nvme_transport_poll_group *tgroup = NULL, *tmp_tgroup, *tgroup_1 = NULL,
+ *tgroup_2 = NULL,
+ *tgroup_4 = NULL;
+ struct spdk_nvme_qpair *qpair;
+ struct spdk_nvme_qpair qpair1_1 = {0};
+ struct spdk_nvme_qpair qpair1_2 = {0};
+ struct spdk_nvme_qpair qpair2_1 = {0};
+ struct spdk_nvme_qpair qpair2_2 = {0};
+ struct spdk_nvme_qpair qpair4_1 = {0};
+ struct spdk_nvme_qpair qpair4_2 = {0};
+ int i = 0;
+
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t1, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t2, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t3, link);
+
+ group = spdk_nvme_poll_group_create(NULL);
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+ CU_ASSERT(STAILQ_EMPTY(&group->tgroups));
+
+ /* Add qpairs to a single transport. */
+ qpair1_1.transport = &t1;
+ qpair1_1.state = NVME_QPAIR_DISCONNECTED;
+ qpair1_2.transport = &t1;
+ qpair1_2.state = NVME_QPAIR_ENABLED;
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair1_1) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair1_2) == -EINVAL);
+ STAILQ_FOREACH(tmp_tgroup, &group->tgroups, link) {
+ if (tmp_tgroup->transport == &t1) {
+ tgroup = tmp_tgroup;
+ } else {
+ CU_ASSERT(STAILQ_EMPTY(&tmp_tgroup->connected_qpairs));
+ }
+ i++;
+ }
+ CU_ASSERT(i == 1);
+ SPDK_CU_ASSERT_FATAL(tgroup != NULL);
+ qpair = STAILQ_FIRST(&tgroup->connected_qpairs);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair1_1);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ CU_ASSERT(qpair == NULL);
+
+ /* Add qpairs to a second transport. */
+ qpair2_1.transport = &t2;
+ qpair2_2.transport = &t2;
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair2_1) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair2_2) == 0);
+ qpair4_1.transport = &t4;
+ qpair4_2.transport = &t4;
+ /* Add qpairs for a transport that doesn't exist. */
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair4_1) == -ENODEV);
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair4_2) == -ENODEV);
+ i = 0;
+ STAILQ_FOREACH(tmp_tgroup, &group->tgroups, link) {
+ if (tmp_tgroup->transport == &t1) {
+ tgroup_1 = tmp_tgroup;
+ } else if (tmp_tgroup->transport == &t2) {
+ tgroup_2 = tmp_tgroup;
+ } else {
+ CU_ASSERT(STAILQ_EMPTY(&tmp_tgroup->connected_qpairs));
+ }
+ i++;
+ }
+ CU_ASSERT(i == 2);
+ SPDK_CU_ASSERT_FATAL(tgroup_1 != NULL);
+ qpair = STAILQ_FIRST(&tgroup_1->connected_qpairs);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair1_1);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ CU_ASSERT(qpair == NULL);
+ SPDK_CU_ASSERT_FATAL(tgroup_2 != NULL);
+ qpair = STAILQ_FIRST(&tgroup_2->connected_qpairs);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair2_1);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair2_2);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ CU_ASSERT(qpair == NULL);
+
+ /* Try removing a qpair that belongs to a transport not in our poll group. */
+ CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair4_1) == -ENODEV);
+
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t4, link);
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair4_1) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair4_2) == 0);
+ STAILQ_FOREACH(tmp_tgroup, &group->tgroups, link) {
+ if (tmp_tgroup->transport == &t1) {
+ tgroup_1 = tmp_tgroup;
+ } else if (tmp_tgroup->transport == &t2) {
+ tgroup_2 = tmp_tgroup;
+ } else if (tmp_tgroup->transport == &t4) {
+ tgroup_4 = tmp_tgroup;
+ } else {
+ CU_ASSERT(STAILQ_EMPTY(&tmp_tgroup->connected_qpairs));
+ }
+ }
+ SPDK_CU_ASSERT_FATAL(tgroup_1 != NULL);
+ qpair = STAILQ_FIRST(&tgroup_1->connected_qpairs);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair1_1);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ CU_ASSERT(qpair == NULL);
+ SPDK_CU_ASSERT_FATAL(tgroup_2 != NULL);
+ qpair = STAILQ_FIRST(&tgroup_2->connected_qpairs);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair2_1);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair2_2);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ CU_ASSERT(qpair == NULL);
+ SPDK_CU_ASSERT_FATAL(tgroup_4 != NULL);
+ qpair = STAILQ_FIRST(&tgroup_4->connected_qpairs);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair4_1);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ SPDK_CU_ASSERT_FATAL(qpair == &qpair4_2);
+ qpair = STAILQ_NEXT(qpair, poll_group_stailq);
+ CU_ASSERT(qpair == NULL);
+
+ /* remove all qpairs */
+ CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair1_1) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair2_1) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair2_2) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair4_1) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair4_2) == 0);
+ /* Confirm the fourth transport group was created. */
+ i = 0;
+ STAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp_tgroup) {
+ CU_ASSERT(STAILQ_EMPTY(&tgroup->connected_qpairs));
+ STAILQ_REMOVE(&group->tgroups, tgroup, spdk_nvme_transport_poll_group, link);
+ free(tgroup);
+ i++;
+ }
+ CU_ASSERT(i == 3);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
+
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t1, link);
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t2, link);
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t3, link);
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t4, link);
+}
+
+static void
+test_spdk_nvme_poll_group_process_completions(void)
+{
+ struct spdk_nvme_poll_group *group;
+ struct spdk_nvme_transport_poll_group *tgroup, *tmp_tgroup;
+ struct spdk_nvme_qpair qpair1_1 = {0};
+
+ group = spdk_nvme_poll_group_create(NULL);
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+
+ /* If we don't have any transport poll groups, we shouldn't get any completions. */
+ g_process_completions_return_value = 32;
+ CU_ASSERT(spdk_nvme_poll_group_process_completions(group, 128,
+ unit_test_disconnected_qpair_cb) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
+
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t1, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t2, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t3, link);
+
+ /* try it with three transport poll groups. */
+ group = spdk_nvme_poll_group_create(NULL);
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+ qpair1_1.state = NVME_QPAIR_DISCONNECTED;
+ qpair1_1.transport = &t1;
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair1_1) == 0);
+ qpair1_1.state = NVME_QPAIR_ENABLED;
+ CU_ASSERT(nvme_poll_group_connect_qpair(&qpair1_1) == 0);
+ CU_ASSERT(spdk_nvme_poll_group_process_completions(group, 128,
+ unit_test_disconnected_qpair_cb) == 32);
+ CU_ASSERT(spdk_nvme_poll_group_remove(group, &qpair1_1) == 0);
+ STAILQ_FOREACH_SAFE(tgroup, &group->tgroups, link, tmp_tgroup) {
+ CU_ASSERT(STAILQ_EMPTY(&tgroup->connected_qpairs));
+ STAILQ_REMOVE(&group->tgroups, tgroup, spdk_nvme_transport_poll_group, link);
+ free(tgroup);
+ }
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
+
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t1, link);
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t2, link);
+ TAILQ_REMOVE(&g_spdk_nvme_transports, &t3, link);
+}
+
+static void
+test_spdk_nvme_poll_group_destroy(void)
+{
+ struct spdk_nvme_poll_group *group;
+ struct spdk_nvme_transport_poll_group *tgroup, *tgroup_1, *tgroup_2;
+ struct spdk_nvme_qpair qpair1_1 = {0};
+ int num_tgroups = 0;
+
+ /* Simple destruction of empty poll group. */
+ group = spdk_nvme_poll_group_create(NULL);
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
+
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t1, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t2, link);
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_transports, &t3, link);
+ group = spdk_nvme_poll_group_create(NULL);
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+
+ qpair1_1.transport = &t1;
+ CU_ASSERT(spdk_nvme_poll_group_add(group, &qpair1_1) == 0);
+
+ /* Don't remove busy poll groups. */
+ g_destroy_return_value = -EBUSY;
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == -EBUSY);
+ STAILQ_FOREACH(tgroup, &group->tgroups, link) {
+ num_tgroups++;
+ }
+ CU_ASSERT(num_tgroups == 1);
+
+ /* destroy poll group with internal poll groups. */
+ g_destroy_return_value = 0;
+ tgroup_1 = STAILQ_FIRST(&group->tgroups);
+ tgroup_2 = STAILQ_NEXT(tgroup_1, link);
+ CU_ASSERT(tgroup_2 == NULL)
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_poll_group_destroy(group) == 0);
+ free(tgroup_1);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme_ns_cmd", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "nvme_poll_group_create_test", test_spdk_nvme_poll_group_create) == NULL ||
+ CU_add_test(suite, "nvme_poll_group_add_remove_test",
+ test_spdk_nvme_poll_group_add_remove) == NULL ||
+ CU_add_test(suite, "nvme_poll_group_process_completions",
+ test_spdk_nvme_poll_group_process_completions) == NULL ||
+ CU_add_test(suite, "nvme_poll_group_destroy_test", test_spdk_nvme_poll_group_destroy) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_qpair.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/.gitignore
new file mode 100644
index 000000000..1bb18e997
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/.gitignore
@@ -0,0 +1 @@
+nvme_qpair_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_qpair.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/Makefile
new file mode 100644
index 000000000..d7762a384
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_qpair_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c
new file mode 100644
index 000000000..e34c70413
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c
@@ -0,0 +1,625 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+
+pid_t g_spdk_nvme_pid;
+
+bool trace_flag = false;
+#define SPDK_LOG_NVME trace_flag
+
+#include "nvme/nvme_qpair.c"
+
+struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+DEFINE_STUB_V(nvme_transport_qpair_abort_reqs, (struct spdk_nvme_qpair *qpair, uint32_t dnr));
+DEFINE_STUB(nvme_transport_qpair_submit_request, int,
+ (struct spdk_nvme_qpair *qpair, struct nvme_request *req), 0);
+DEFINE_STUB(spdk_nvme_ctrlr_free_io_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
+DEFINE_STUB_V(nvme_transport_ctrlr_disconnect_qpair, (struct spdk_nvme_ctrlr *ctrlr,
+ struct spdk_nvme_qpair *qpair));
+DEFINE_STUB_V(nvme_ctrlr_disconnect_qpair, (struct spdk_nvme_qpair *qpair));
+
+void
+nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
+{
+ if (hot_remove) {
+ ctrlr->is_removed = true;
+ }
+ ctrlr->is_failed = true;
+}
+
+static bool g_called_transport_process_completions = false;
+static int32_t g_transport_process_completions_rc = 0;
+int32_t
+nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ g_called_transport_process_completions = true;
+ return g_transport_process_completions_rc;
+}
+
+static void
+prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
+ struct spdk_nvme_ctrlr *ctrlr)
+{
+ memset(ctrlr, 0, sizeof(*ctrlr));
+ ctrlr->free_io_qids = NULL;
+ TAILQ_INIT(&ctrlr->active_io_qpairs);
+ TAILQ_INIT(&ctrlr->active_procs);
+ MOCK_CLEAR(spdk_zmalloc);
+ nvme_qpair_init(qpair, 1, ctrlr, 0, 32);
+}
+
+static void
+cleanup_submit_request_test(struct spdk_nvme_qpair *qpair)
+{
+ free(qpair->req_buf);
+}
+
+static void
+expected_success_callback(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ CU_ASSERT(!spdk_nvme_cpl_is_error(cpl));
+}
+
+static void
+expected_failure_callback(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ CU_ASSERT(spdk_nvme_cpl_is_error(cpl));
+}
+
+static void
+test3(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct nvme_request *req;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+
+ req = nvme_allocate_request_null(&qpair, expected_success_callback, NULL);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);
+
+ nvme_free_request(req);
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void
+test_ctrlr_failed(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct nvme_request *req;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ char payload[4096];
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+
+ req = nvme_allocate_request_contig(&qpair, payload, sizeof(payload), expected_failure_callback,
+ NULL);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ /* Set the controller to failed.
+ * Set the controller to resetting so that the qpair won't get re-enabled.
+ */
+ ctrlr.is_failed = true;
+ ctrlr.is_resetting = true;
+
+ CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void struct_packing(void)
+{
+ /* ctrlr is the first field in nvme_qpair after the fields
+ * that are used in the I/O path. Make sure the I/O path fields
+ * all fit into two cache lines.
+ */
+ CU_ASSERT(offsetof(struct spdk_nvme_qpair, ctrlr) <= 128);
+}
+
+static int g_num_cb_failed = 0;
+static int g_num_cb_passed = 0;
+
+static void
+dummy_cb_fn(void *cb_arg, const struct spdk_nvme_cpl *cpl)
+{
+ if (cpl->status.sc == SPDK_NVME_SC_SUCCESS) {
+ g_num_cb_passed++;
+ } else {
+ g_num_cb_failed++;
+ }
+}
+
+static void test_nvme_qpair_process_completions(void)
+{
+ struct spdk_nvme_qpair admin_qp = {0};
+ struct spdk_nvme_qpair qpair = {0};
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct nvme_request dummy_1 = {{0}};
+ struct nvme_request dummy_2 = {{0}};
+ int rc;
+
+ dummy_1.cb_fn = dummy_cb_fn;
+ dummy_2.cb_fn = dummy_cb_fn;
+ dummy_1.qpair = &qpair;
+ dummy_2.qpair = &qpair;
+
+ TAILQ_INIT(&ctrlr.active_io_qpairs);
+ TAILQ_INIT(&ctrlr.active_procs);
+ nvme_qpair_init(&qpair, 1, &ctrlr, 0, 32);
+ nvme_qpair_init(&admin_qp, 0, &ctrlr, 0, 32);
+
+ ctrlr.adminq = &admin_qp;
+
+ STAILQ_INIT(&qpair.queued_req);
+ STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_1, stailq);
+ STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_2, stailq);
+
+ /* If the controller is failed, return -ENXIO */
+ ctrlr.is_failed = true;
+ ctrlr.is_removed = false;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 0);
+ CU_ASSERT(rc == -ENXIO);
+ CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
+ CU_ASSERT(g_num_cb_passed == 0);
+ CU_ASSERT(g_num_cb_failed == 0);
+
+ /* Same if the qpair is failed at the transport layer. */
+ ctrlr.is_failed = false;
+ ctrlr.is_removed = false;
+ qpair.state = NVME_QPAIR_DISCONNECTED;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 0);
+ CU_ASSERT(rc == -ENXIO);
+ CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
+ CU_ASSERT(g_num_cb_passed == 0);
+ CU_ASSERT(g_num_cb_failed == 0);
+
+ /* If the controller is removed, make sure we abort the requests. */
+ ctrlr.is_failed = true;
+ ctrlr.is_removed = true;
+ qpair.state = NVME_QPAIR_CONNECTED;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 0);
+ CU_ASSERT(rc == -ENXIO);
+ CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
+ CU_ASSERT(g_num_cb_passed == 0);
+ CU_ASSERT(g_num_cb_failed == 2);
+
+ /* If we are resetting, make sure that we don't call into the transport. */
+ STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_1, stailq);
+ dummy_1.queued = true;
+ STAILQ_INSERT_TAIL(&qpair.queued_req, &dummy_2, stailq);
+ dummy_2.queued = true;
+ g_num_cb_failed = 0;
+ ctrlr.is_failed = false;
+ ctrlr.is_removed = false;
+ ctrlr.is_resetting = true;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 0);
+ CU_ASSERT(rc == -ENXIO);
+ CU_ASSERT(g_called_transport_process_completions == false);
+ /* We also need to make sure we didn't abort the requests. */
+ CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
+ CU_ASSERT(g_num_cb_passed == 0);
+ CU_ASSERT(g_num_cb_failed == 0);
+
+ /* The case where we aren't resetting, but are enabling the qpair is the same as above. */
+ ctrlr.is_resetting = false;
+ qpair.state = NVME_QPAIR_ENABLING;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 0);
+ CU_ASSERT(rc == -ENXIO);
+ CU_ASSERT(g_called_transport_process_completions == false);
+ CU_ASSERT(!STAILQ_EMPTY(&qpair.queued_req));
+ CU_ASSERT(g_num_cb_passed == 0);
+ CU_ASSERT(g_num_cb_failed == 0);
+
+ /* For other qpair states, we want to enable the qpair. */
+ qpair.state = NVME_QPAIR_CONNECTED;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_called_transport_process_completions == true);
+ /* These should have been submitted to the lower layer. */
+ CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
+ CU_ASSERT(g_num_cb_passed == 0);
+ CU_ASSERT(g_num_cb_failed == 0);
+ CU_ASSERT(nvme_qpair_get_state(&qpair) == NVME_QPAIR_ENABLED);
+
+ g_called_transport_process_completions = false;
+ g_transport_process_completions_rc = -ENXIO;
+
+ /* Fail the controller if we get an error from the transport on admin qpair. */
+ admin_qp.state = NVME_QPAIR_ENABLED;
+ rc = spdk_nvme_qpair_process_completions(&admin_qp, 0);
+ CU_ASSERT(rc == -ENXIO);
+ CU_ASSERT(g_called_transport_process_completions == true);
+ CU_ASSERT(ctrlr.is_failed == true);
+
+ /* Don't fail the controller for regular qpairs. */
+ ctrlr.is_failed = false;
+ g_called_transport_process_completions = false;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 0);
+ CU_ASSERT(rc == -ENXIO);
+ CU_ASSERT(g_called_transport_process_completions == true);
+ CU_ASSERT(ctrlr.is_failed == false);
+
+ /* Make sure we don't modify the return value from the transport. */
+ ctrlr.is_failed = false;
+ g_called_transport_process_completions = false;
+ g_transport_process_completions_rc = 23;
+ rc = spdk_nvme_qpair_process_completions(&qpair, 0);
+ CU_ASSERT(rc == 23);
+ CU_ASSERT(g_called_transport_process_completions == true);
+ CU_ASSERT(ctrlr.is_failed == false);
+
+ free(qpair.req_buf);
+ free(admin_qp.req_buf);
+}
+
+static void test_nvme_completion_is_retry(void)
+{
+ struct spdk_nvme_cpl cpl = {};
+
+ cpl.status.sct = SPDK_NVME_SCT_GENERIC;
+ cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
+ cpl.status.dnr = 0;
+ CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_FORMAT_IN_PROGRESS;
+ cpl.status.dnr = 1;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+ cpl.status.dnr = 0;
+ CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_FIELD;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_COMMAND_ID_CONFLICT;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_DATA_TRANSFER_ERROR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ABORTED_POWER_LOSS;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_SGL_SEG_DESCRIPTOR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_METADATA_SGL_LENGTH_INVALID;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_CONTROLLER_MEM_BUF;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_PRP_OFFSET;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_CAPACITY_EXCEEDED;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_RESERVATION_CONFLICT;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = 0x70;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_PATH;
+ cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
+ cpl.status.dnr = 0;
+ CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_PATH;
+ cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
+ cpl.status.dnr = 1;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = 0x4;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+}
+
+#ifdef DEBUG
+static void
+test_get_status_string(void)
+{
+ const char *status_string;
+ struct spdk_nvme_status status;
+
+ status.sct = SPDK_NVME_SCT_GENERIC;
+ status.sc = SPDK_NVME_SC_SUCCESS;
+ status_string = spdk_nvme_cpl_get_status_string(&status);
+ CU_ASSERT(strcmp(status_string, "SUCCESS") == 0);
+
+ status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
+ status.sc = SPDK_NVME_SC_COMPLETION_QUEUE_INVALID;
+ status_string = spdk_nvme_cpl_get_status_string(&status);
+ CU_ASSERT(strcmp(status_string, "INVALID COMPLETION QUEUE") == 0);
+
+ status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
+ status.sc = SPDK_NVME_SC_UNRECOVERED_READ_ERROR;
+ status_string = spdk_nvme_cpl_get_status_string(&status);
+ CU_ASSERT(strcmp(status_string, "UNRECOVERED READ ERROR") == 0);
+
+ status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
+ status.sc = 0;
+ status_string = spdk_nvme_cpl_get_status_string(&status);
+ CU_ASSERT(strcmp(status_string, "VENDOR SPECIFIC") == 0);
+
+ status.sct = 0x4;
+ status.sc = 0;
+ status_string = spdk_nvme_cpl_get_status_string(&status);
+ CU_ASSERT(strcmp(status_string, "RESERVED") == 0);
+}
+#endif
+
+static void
+test_nvme_qpair_add_cmd_error_injection(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_ctrlr ctrlr = {};
+ int rc;
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+ ctrlr.adminq = &qpair;
+
+ /* Admin error injection at submission path */
+ MOCK_CLEAR(spdk_zmalloc);
+ rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, NULL,
+ SPDK_NVME_OPC_GET_FEATURES, true, 5000, 1,
+ SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_INVALID_FIELD);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ /* Remove cmd error injection */
+ spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, NULL, SPDK_NVME_OPC_GET_FEATURES);
+
+ CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ /* IO error injection at completion path */
+ rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
+ SPDK_NVME_OPC_READ, false, 0, 1,
+ SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ /* Provide the same opc, and check whether allocate a new entry */
+ rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
+ SPDK_NVME_OPC_READ, false, 0, 1,
+ SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
+
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&qpair.err_cmd_head));
+ CU_ASSERT(TAILQ_NEXT(TAILQ_FIRST(&qpair.err_cmd_head), link) == NULL);
+
+ /* Remove cmd error injection */
+ spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_READ);
+
+ CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
+ SPDK_NVME_OPC_COMPARE, true, 0, 5,
+ SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_COMPARE_FAILURE);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ /* Remove cmd error injection */
+ spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_COMPARE);
+
+ CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void
+test_nvme_qpair_submit_request(void)
+{
+ int rc;
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct nvme_request *req, *req1, *req2, *req3, *req2_1, *req2_2, *req2_3;
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+
+ /*
+ * Build a request chain like the following:
+ * req
+ * |
+ * ---------------
+ * | | |
+ * req1 req2 req3
+ * |
+ * ---------------
+ * | | |
+ * req2_1 req2_2 req2_3
+ */
+ req = nvme_allocate_request_null(&qpair, NULL, NULL);
+ CU_ASSERT(req != NULL);
+ TAILQ_INIT(&req->children);
+
+ req1 = nvme_allocate_request_null(&qpair, NULL, NULL);
+ CU_ASSERT(req1 != NULL);
+ req->num_children++;
+ TAILQ_INSERT_TAIL(&req->children, req1, child_tailq);
+ req1->parent = req;
+
+ req2 = nvme_allocate_request_null(&qpair, NULL, NULL);
+ CU_ASSERT(req2 != NULL);
+ TAILQ_INIT(&req2->children);
+ req->num_children++;
+ TAILQ_INSERT_TAIL(&req->children, req2, child_tailq);
+ req2->parent = req;
+
+ req3 = nvme_allocate_request_null(&qpair, NULL, NULL);
+ CU_ASSERT(req3 != NULL);
+ req->num_children++;
+ TAILQ_INSERT_TAIL(&req->children, req3, child_tailq);
+ req3->parent = req;
+
+ req2_1 = nvme_allocate_request_null(&qpair, NULL, NULL);
+ CU_ASSERT(req2_1 != NULL);
+ req2->num_children++;
+ TAILQ_INSERT_TAIL(&req2->children, req2_1, child_tailq);
+ req2_1->parent = req2;
+
+ req2_2 = nvme_allocate_request_null(&qpair, NULL, NULL);
+ CU_ASSERT(req2_2 != NULL);
+ req2->num_children++;
+ TAILQ_INSERT_TAIL(&req2->children, req2_2, child_tailq);
+ req2_2->parent = req2;
+
+ req2_3 = nvme_allocate_request_null(&qpair, NULL, NULL);
+ CU_ASSERT(req2_3 != NULL);
+ req2->num_children++;
+ TAILQ_INSERT_TAIL(&req2->children, req2_3, child_tailq);
+ req2_3->parent = req2;
+
+ ctrlr.is_failed = true;
+ rc = nvme_qpair_submit_request(&qpair, req);
+ SPDK_CU_ASSERT_FATAL(rc == -ENXIO);
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void
+test_nvme_qpair_resubmit_request_with_transport_failed(void)
+{
+ int rc;
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct nvme_request *req;
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+
+ req = nvme_allocate_request_null(&qpair, dummy_cb_fn, NULL);
+ CU_ASSERT(req != NULL);
+ TAILQ_INIT(&req->children);
+
+ STAILQ_INSERT_TAIL(&qpair.queued_req, req, stailq);
+ req->queued = true;
+
+ g_transport_process_completions_rc = 1;
+ qpair.state = NVME_QPAIR_ENABLED;
+ g_num_cb_failed = 0;
+ MOCK_SET(nvme_transport_qpair_submit_request, -EINVAL);
+ rc = spdk_nvme_qpair_process_completions(&qpair, g_transport_process_completions_rc);
+ MOCK_CLEAR(nvme_transport_qpair_submit_request);
+ CU_ASSERT(rc == g_transport_process_completions_rc);
+ CU_ASSERT(STAILQ_EMPTY(&qpair.queued_req));
+ CU_ASSERT(g_num_cb_failed == 1);
+
+ cleanup_submit_request_test(&qpair);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_qpair", NULL, NULL);
+
+ CU_ADD_TEST(suite, test3);
+ CU_ADD_TEST(suite, test_ctrlr_failed);
+ CU_ADD_TEST(suite, struct_packing);
+ CU_ADD_TEST(suite, test_nvme_qpair_process_completions);
+ CU_ADD_TEST(suite, test_nvme_completion_is_retry);
+#ifdef DEBUG
+ CU_ADD_TEST(suite, test_get_status_string);
+#endif
+ CU_ADD_TEST(suite, test_nvme_qpair_add_cmd_error_injection);
+ CU_ADD_TEST(suite, test_nvme_qpair_submit_request);
+ CU_ADD_TEST(suite, test_nvme_qpair_resubmit_request_with_transport_failed);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_quirks.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/.gitignore
new file mode 100644
index 000000000..eca86651b
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/.gitignore
@@ -0,0 +1 @@
+nvme_quirks_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_quirks.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/Makefile
new file mode 100644
index 000000000..d86887f0e
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_quirks_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_quirks.c/nvme_quirks_ut.c b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/nvme_quirks_ut.c
new file mode 100644
index 000000000..c3e799251
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/nvme_quirks_ut.c
@@ -0,0 +1,92 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_quirks.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME)
+
+static void
+test_nvme_quirks_striping(void)
+{
+ struct spdk_pci_id pci_id = {};
+ uint64_t quirks = 0;
+
+ /* Non-Intel device should not have striping enabled */
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) == 0);
+
+ /* Set the vendor id to Intel, but no device id. No striping. */
+ pci_id.class_id = SPDK_PCI_CLASS_NVME;
+ pci_id.vendor_id = SPDK_PCI_VID_INTEL;
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) == 0);
+
+ /* Device ID 0x0953 should have striping enabled */
+ pci_id.device_id = 0x0953;
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) != 0);
+
+ /* Even if specific subvendor/subdevice ids are set,
+ * striping should be enabled.
+ */
+ pci_id.subvendor_id = SPDK_PCI_VID_INTEL;
+ pci_id.subdevice_id = 0x3704;
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) != 0);
+
+ pci_id.subvendor_id = 1234;
+ pci_id.subdevice_id = 42;
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) != 0);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_quirks", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_nvme_quirks_striping);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_rdma.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/.gitignore
new file mode 100644
index 000000000..66265b955
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/.gitignore
@@ -0,0 +1 @@
+nvme_rdma_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_rdma.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/Makefile
new file mode 100644
index 000000000..7ea42632b
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_rdma_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c
new file mode 100644
index 000000000..8342e84d3
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c
@@ -0,0 +1,406 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "nvme/nvme_rdma.c"
+#include "common/lib/nvme/common_stubs.h"
+#include "common/lib/test_rdma.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME)
+
+DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
+ uint64_t size, uint64_t translation), 0);
+DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
+ uint64_t size), 0);
+
+DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation,
+ const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL);
+DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap));
+
+DEFINE_STUB(nvme_poll_group_connect_qpair, int, (struct spdk_nvme_qpair *qpair), 0);
+
+DEFINE_STUB_V(nvme_qpair_resubmit_requests, (struct spdk_nvme_qpair *qpair, uint32_t num_requests));
+DEFINE_STUB(spdk_nvme_poll_group_process_completions, int64_t, (struct spdk_nvme_poll_group *group,
+ uint32_t completions_per_qpair, spdk_nvme_disconnected_qpair_cb disconnected_qpair_cb), 0)
+
+/* used to mock out having to split an SGL over a memory region */
+uint64_t g_mr_size;
+struct ibv_mr g_nvme_rdma_mr;
+
+uint64_t
+spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size)
+{
+ if (g_mr_size != 0) {
+ *(uint32_t *)size = g_mr_size;
+ }
+
+ return (uint64_t)&g_nvme_rdma_mr;
+}
+
+struct nvme_rdma_ut_bdev_io {
+ struct iovec iovs[NVME_RDMA_MAX_SGL_DESCRIPTORS];
+ int iovpos;
+};
+
+/* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */
+static void nvme_rdma_ut_reset_sgl(void *cb_arg, uint32_t offset)
+{
+ struct nvme_rdma_ut_bdev_io *bio = cb_arg;
+ struct iovec *iov;
+
+ for (bio->iovpos = 0; bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
+ iov = &bio->iovs[bio->iovpos];
+ /* Only provide offsets at the beginning of an iov */
+ if (offset == 0) {
+ break;
+ }
+
+ offset -= iov->iov_len;
+ }
+
+ SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
+}
+
+static int nvme_rdma_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
+{
+ struct nvme_rdma_ut_bdev_io *bio = cb_arg;
+ struct iovec *iov;
+
+ SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
+
+ iov = &bio->iovs[bio->iovpos];
+
+ *address = iov->iov_base;
+ *length = iov->iov_len;
+ bio->iovpos++;
+
+ return 0;
+}
+
+static void
+test_nvme_rdma_build_sgl_request(void)
+{
+ struct nvme_rdma_qpair rqpair;
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct spdk_nvmf_cmd cmd = {{0}};
+ struct spdk_nvme_rdma_req rdma_req = {0};
+ struct nvme_request req = {{0}};
+ struct nvme_rdma_ut_bdev_io bio;
+ struct spdk_nvme_rdma_mr_map rmap = {0};
+ struct spdk_mem_map *map = NULL;
+ uint64_t i;
+ int rc;
+
+ rmap.map = map;
+
+ ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
+ ctrlr.cdata.nvmf_specific.msdbd = 16;
+
+ rqpair.mr_map = &rmap;
+ rqpair.qpair.ctrlr = &ctrlr;
+ rqpair.cmds = &cmd;
+ cmd.sgl[0].address = 0x1111;
+ rdma_req.id = 0;
+ rdma_req.req = &req;
+
+ req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
+ req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
+ req.payload.contig_or_cb_arg = &bio;
+ req.qpair = &rqpair.qpair;
+
+ g_nvme_rdma_mr.rkey = 1;
+
+ for (i = 0; i < NVME_RDMA_MAX_SGL_DESCRIPTORS; i++) {
+ bio.iovs[i].iov_base = (void *)i;
+ bio.iovs[i].iov_len = 0;
+ }
+
+ /* Test case 1: single SGL. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x1000;
+ bio.iovs[0].iov_len = 0x1000;
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 1);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == g_nvme_rdma_mr.rkey);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)bio.iovs[0].iov_base);
+ CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
+
+ /* Test case 2: multiple SGL. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x4000;
+ for (i = 0; i < 4; i++) {
+ bio.iovs[i].iov_len = 0x1000;
+ }
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 4);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 4 * sizeof(struct spdk_nvme_sgl_descriptor));
+ CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)0);
+ CU_ASSERT(rdma_req.send_sgl[0].length == 4 * sizeof(struct spdk_nvme_sgl_descriptor) + sizeof(
+ struct spdk_nvme_cmd))
+ for (i = 0; i < 4; i++) {
+ CU_ASSERT(cmd.sgl[i].keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
+ CU_ASSERT(cmd.sgl[i].keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
+ CU_ASSERT(cmd.sgl[i].keyed.length == bio.iovs[i].iov_len);
+ CU_ASSERT(cmd.sgl[i].keyed.key == g_nvme_rdma_mr.rkey);
+ CU_ASSERT(cmd.sgl[i].address == (uint64_t)bio.iovs[i].iov_base);
+ }
+
+ /* Test case 3: Multiple SGL, SGL 2X mr size. Expected: FAIL */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ g_mr_size = 0x800;
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc != 0);
+ CU_ASSERT(bio.iovpos == 1);
+
+ /* Test case 4: Multiple SGL, SGL size smaller than I/O size. Expected: FAIL */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x6000;
+ g_mr_size = 0x0;
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc != 0);
+ CU_ASSERT(bio.iovpos == NVME_RDMA_MAX_SGL_DESCRIPTORS);
+
+ /* Test case 5: SGL length exceeds 3 bytes. Expected: FAIL */
+ req.payload_size = 0x1000 + (1 << 24);
+ bio.iovs[0].iov_len = 0x1000;
+ bio.iovs[1].iov_len = 1 << 24;
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc != 0);
+}
+
+static void
+test_nvme_rdma_build_sgl_inline_request(void)
+{
+ struct nvme_rdma_qpair rqpair;
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct spdk_nvmf_cmd cmd = {{0}};
+ struct spdk_nvme_rdma_req rdma_req = {0};
+ struct nvme_request req = {{0}};
+ struct nvme_rdma_ut_bdev_io bio;
+ struct spdk_nvme_rdma_mr_map rmap = {0};
+ struct spdk_mem_map *map = NULL;
+ int rc;
+
+ rmap.map = map;
+
+ ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
+ ctrlr.cdata.nvmf_specific.msdbd = 16;
+
+ rqpair.mr_map = &rmap;
+ rqpair.qpair.ctrlr = &ctrlr;
+ rqpair.cmds = &cmd;
+ cmd.sgl[0].address = 0x1111;
+ rdma_req.id = 0;
+ rdma_req.req = &req;
+
+ req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
+ req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
+ req.payload.contig_or_cb_arg = &bio;
+ req.qpair = &rqpair.qpair;
+
+ g_nvme_rdma_mr.lkey = 2;
+
+ /* Test case 1: single inline SGL. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x1000;
+ bio.iovs[0].iov_base = (void *)0xdeadbeef;
+ bio.iovs[0].iov_len = 0x1000;
+ rc = nvme_rdma_build_sgl_inline_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 1);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
+ CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
+ CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
+ CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base);
+ CU_ASSERT(rdma_req.send_sgl[1].lkey == g_nvme_rdma_mr.lkey);
+
+ /* Test case 2: SGL length exceeds 3 bytes. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 1 << 24;
+ bio.iovs[0].iov_len = 1 << 24;
+ rc = nvme_rdma_build_sgl_inline_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 1);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
+ CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
+ CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
+ CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)bio.iovs[0].iov_base);
+ CU_ASSERT(rdma_req.send_sgl[1].lkey == g_nvme_rdma_mr.lkey);
+}
+
+static void
+test_nvme_rdma_build_contig_request(void)
+{
+ struct nvme_rdma_qpair rqpair;
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct spdk_nvmf_cmd cmd = {{0}};
+ struct spdk_nvme_rdma_req rdma_req = {0};
+ struct nvme_request req = {{0}};
+ struct spdk_nvme_rdma_mr_map rmap = {0};
+ struct spdk_mem_map *map = NULL;
+ int rc;
+
+ rmap.map = map;
+
+ ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
+ ctrlr.cdata.nvmf_specific.msdbd = 16;
+
+ rqpair.mr_map = &rmap;
+ rqpair.qpair.ctrlr = &ctrlr;
+ rqpair.cmds = &cmd;
+ cmd.sgl[0].address = 0x1111;
+ rdma_req.id = 0;
+ rdma_req.req = &req;
+
+ req.payload.contig_or_cb_arg = (void *)0xdeadbeef;
+ req.qpair = &rqpair.qpair;
+
+ g_nvme_rdma_mr.rkey = 2;
+
+ /* Test case 1: contig request. Expected: PASS */
+ req.payload_offset = 0;
+ req.payload_size = 0x1000;
+ rc = nvme_rdma_build_contig_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == g_nvme_rdma_mr.rkey);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)req.payload.contig_or_cb_arg);
+ CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
+
+ /* Test case 2: SGL length exceeds 3 bytes. Expected: FAIL */
+ req.payload_offset = 0;
+ req.payload_size = 1 << 24;
+ rc = nvme_rdma_build_contig_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc != 0);
+}
+
+static void
+test_nvme_rdma_build_contig_inline_request(void)
+{
+ struct nvme_rdma_qpair rqpair;
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct spdk_nvmf_cmd cmd = {{0}};
+ struct spdk_nvme_rdma_req rdma_req = {0};
+ struct nvme_request req = {{0}};
+ struct spdk_nvme_rdma_mr_map rmap = {0};
+ struct spdk_mem_map *map = NULL;
+ int rc;
+
+ rmap.map = map;
+
+ ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
+ ctrlr.cdata.nvmf_specific.msdbd = 16;
+
+ rqpair.mr_map = &rmap;
+ rqpair.qpair.ctrlr = &ctrlr;
+ rqpair.cmds = &cmd;
+ cmd.sgl[0].address = 0x1111;
+ rdma_req.id = 0;
+ rdma_req.req = &req;
+
+ req.payload.contig_or_cb_arg = (void *)0xdeadbeef;
+ req.qpair = &rqpair.qpair;
+
+ g_nvme_rdma_mr.rkey = 2;
+
+ /* Test case 1: single inline SGL. Expected: PASS */
+ req.payload_offset = 0;
+ req.payload_size = 0x1000;
+ rc = nvme_rdma_build_contig_inline_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
+ CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
+ CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
+ CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg);
+ CU_ASSERT(rdma_req.send_sgl[1].lkey == g_nvme_rdma_mr.lkey);
+
+ /* Test case 2: SGL length exceeds 3 bytes. Expected: PASS */
+ req.payload_offset = 0;
+ req.payload_size = 1 << 24;
+ rc = nvme_rdma_build_contig_inline_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == req.payload_size);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == 0);
+ CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
+ CU_ASSERT(rdma_req.send_sgl[1].length == req.payload_size);
+ CU_ASSERT(rdma_req.send_sgl[1].addr == (uint64_t)req.payload.contig_or_cb_arg);
+ CU_ASSERT(rdma_req.send_sgl[1].lkey == g_nvme_rdma_mr.lkey);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_rdma", NULL, NULL);
+ CU_ADD_TEST(suite, test_nvme_rdma_build_sgl_request);
+ CU_ADD_TEST(suite, test_nvme_rdma_build_sgl_inline_request);
+ CU_ADD_TEST(suite, test_nvme_rdma_build_contig_request);
+ CU_ADD_TEST(suite, test_nvme_rdma_build_contig_inline_request);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_tcp.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_tcp.c/.gitignore
new file mode 100644
index 000000000..c0cf6e92c
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_tcp.c/.gitignore
@@ -0,0 +1 @@
+nvme_tcp_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_tcp.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_tcp.c/Makefile
new file mode 100644
index 000000000..612f2b793
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_tcp.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_tcp_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c b/src/spdk/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c
new file mode 100644
index 000000000..ed817fe2d
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_tcp.c/nvme_tcp_ut.c
@@ -0,0 +1,459 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_sock.c"
+
+#include "nvme/nvme_tcp.c"
+#include "common/lib/nvme/common_stubs.h"
+
+SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME);
+
+DEFINE_STUB(nvme_qpair_submit_request,
+ int, (struct spdk_nvme_qpair *qpair, struct nvme_request *req), 0);
+
+DEFINE_STUB(spdk_sock_set_priority,
+ int, (struct spdk_sock *sock, int priority), 0);
+
+DEFINE_STUB(spdk_nvme_poll_group_remove, int, (struct spdk_nvme_poll_group *group,
+ struct spdk_nvme_qpair *qpair), 0);
+
+static void
+test_nvme_tcp_pdu_set_data_buf(void)
+{
+ struct nvme_tcp_pdu pdu = {};
+ struct iovec iov[NVME_TCP_MAX_SGL_DESCRIPTORS] = {};
+ uint32_t data_len;
+ uint64_t i;
+
+ /* 1st case: input is a single SGL entry. */
+ iov[0].iov_base = (void *)0xDEADBEEF;
+ iov[0].iov_len = 4096;
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iov, 1, 1024, 512);
+
+ CU_ASSERT(pdu.data_iovcnt == 1);
+ CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 1024);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 512);
+
+ /* 2nd case: simulate split on multiple SGL entries. */
+ iov[0].iov_base = (void *)0xDEADBEEF;
+ iov[0].iov_len = 4096;
+ iov[1].iov_base = (void *)0xFEEDBEEF;
+ iov[1].iov_len = 512 * 7;
+ iov[2].iov_base = (void *)0xF00DF00D;
+ iov[2].iov_len = 4096 * 2;
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 0, 2048);
+
+ CU_ASSERT(pdu.data_iovcnt == 1);
+ CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 2048);
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 2048, 2048 + 512 * 3);
+
+ CU_ASSERT(pdu.data_iovcnt == 2);
+ CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF + 2048);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 2048);
+ CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
+ CU_ASSERT(pdu.data_iov[1].iov_len == 512 * 3);
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iov, 3, 4096 + 512 * 3, 512 * 4 + 4096 * 2);
+
+ CU_ASSERT(pdu.data_iovcnt == 2);
+ CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xFEEDBEEF + 512 * 3);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 512 * 4);
+ CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xF00DF00D);
+ CU_ASSERT(pdu.data_iov[1].iov_len == 4096 * 2);
+
+ /* 3rd case: Number of input SGL entries is equal to the number of PDU SGL
+ * entries.
+ */
+ data_len = 0;
+ for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
+ iov[i].iov_base = (void *)(0xDEADBEEF + i);
+ iov[i].iov_len = 512 * (i + 1);
+ data_len += 512 * (i + 1);
+ }
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iov, NVME_TCP_MAX_SGL_DESCRIPTORS, 0, data_len);
+
+ CU_ASSERT(pdu.data_iovcnt == NVME_TCP_MAX_SGL_DESCRIPTORS);
+ for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
+ CU_ASSERT((uint64_t)pdu.data_iov[i].iov_base == 0xDEADBEEF + i);
+ CU_ASSERT(pdu.data_iov[i].iov_len == 512 * (i + 1));
+ }
+}
+
+static void
+test_nvme_tcp_build_iovs(void)
+{
+ const uintptr_t pdu_iov_len = 4096;
+ struct nvme_tcp_pdu pdu = {};
+ struct iovec iovs[5] = {};
+ uint32_t mapped_length = 0;
+ int rc;
+
+ pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
+ pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
+ pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + pdu_iov_len * 2 +
+ SPDK_NVME_TCP_DIGEST_LEN;
+ pdu.data_len = pdu_iov_len * 2;
+ pdu.padding_len = 0;
+
+ pdu.data_iov[0].iov_base = (void *)0xDEADBEEF;
+ pdu.data_iov[0].iov_len = pdu_iov_len;
+ pdu.data_iov[1].iov_base = (void *)(0xDEADBEEF + pdu_iov_len);
+ pdu.data_iov[1].iov_len = pdu_iov_len;
+ pdu.data_iovcnt = 2;
+
+ rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length);
+ CU_ASSERT(rc == 4);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
+ CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
+ CU_ASSERT(iovs[1].iov_len == pdu_iov_len);
+ CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len));
+ CU_ASSERT(iovs[2].iov_len == pdu_iov_len);
+ CU_ASSERT(iovs[3].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[3].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
+ CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
+ pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN);
+
+ /* Add a new data_iov entry, update pdu iov count and data length */
+ pdu.data_iov[2].iov_base = (void *)(0xBAADF00D);
+ pdu.data_iov[2].iov_len = 123;
+ pdu.data_iovcnt = 3;
+ pdu.data_len += 123;
+ pdu.hdr.common.plen += 123;
+
+ rc = nvme_tcp_build_iovs(iovs, 5, &pdu, true, true, &mapped_length);
+ CU_ASSERT(rc == 5);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
+ CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
+ CU_ASSERT(iovs[1].iov_len == pdu_iov_len);
+ CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + pdu_iov_len));
+ CU_ASSERT(iovs[2].iov_len == pdu_iov_len);
+ CU_ASSERT(iovs[3].iov_base == (void *)(0xBAADF00D));
+ CU_ASSERT(iovs[3].iov_len == 123);
+ CU_ASSERT(iovs[4].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[4].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
+ CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
+ pdu_iov_len * 2 + SPDK_NVME_TCP_DIGEST_LEN + 123);
+}
+
+struct nvme_tcp_ut_bdev_io {
+ struct iovec iovs[NVME_TCP_MAX_SGL_DESCRIPTORS];
+ int iovpos;
+};
+
+/* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */
+static void
+nvme_tcp_ut_reset_sgl(void *cb_arg, uint32_t offset)
+{
+ struct nvme_tcp_ut_bdev_io *bio = cb_arg;
+ struct iovec *iov;
+
+ for (bio->iovpos = 0; bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
+ iov = &bio->iovs[bio->iovpos];
+ /* Offset must be aligned with the start of any SGL entry */
+ if (offset == 0) {
+ break;
+ }
+
+ SPDK_CU_ASSERT_FATAL(offset >= iov->iov_len);
+ offset -= iov->iov_len;
+ }
+
+ SPDK_CU_ASSERT_FATAL(offset == 0);
+ SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS);
+}
+
+static int
+nvme_tcp_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
+{
+ struct nvme_tcp_ut_bdev_io *bio = cb_arg;
+ struct iovec *iov;
+
+ SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_TCP_MAX_SGL_DESCRIPTORS);
+
+ iov = &bio->iovs[bio->iovpos];
+
+ *address = iov->iov_base;
+ *length = iov->iov_len;
+ bio->iovpos++;
+
+ return 0;
+}
+
+static void
+test_nvme_tcp_build_sgl_request(void)
+{
+ struct nvme_tcp_qpair tqpair;
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct nvme_tcp_req tcp_req = {0};
+ struct nvme_request req = {{0}};
+ struct nvme_tcp_ut_bdev_io bio;
+ uint64_t i;
+ int rc;
+
+ ctrlr.max_sges = NVME_TCP_MAX_SGL_DESCRIPTORS;
+ tqpair.qpair.ctrlr = &ctrlr;
+ tcp_req.req = &req;
+
+ req.payload.reset_sgl_fn = nvme_tcp_ut_reset_sgl;
+ req.payload.next_sge_fn = nvme_tcp_ut_next_sge;
+ req.payload.contig_or_cb_arg = &bio;
+ req.qpair = &tqpair.qpair;
+
+ for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
+ bio.iovs[i].iov_base = (void *)(0xFEEDB000 + i * 0x1000);
+ bio.iovs[i].iov_len = 0;
+ }
+
+ /* Test case 1: Single SGL. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x1000;
+ bio.iovs[0].iov_len = 0x1000;
+ rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 1);
+ CU_ASSERT((uint64_t)tcp_req.iov[0].iov_base == (uint64_t)bio.iovs[0].iov_base);
+ CU_ASSERT(tcp_req.iov[0].iov_len == bio.iovs[0].iov_len);
+ CU_ASSERT(tcp_req.iovcnt == 1);
+
+ /* Test case 2: Multiple SGL. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x4000;
+ for (i = 0; i < 4; i++) {
+ bio.iovs[i].iov_len = 0x1000;
+ }
+ rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 4);
+ CU_ASSERT(tcp_req.iovcnt == 4);
+ for (i = 0; i < 4; i++) {
+ CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len);
+ CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base);
+ }
+
+ /* Test case 3: Payload is bigger than SGL. Expected: FAIL */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x17000;
+ for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
+ bio.iovs[i].iov_len = 0x1000;
+ }
+ rc = nvme_tcp_build_sgl_request(&tqpair, &tcp_req);
+ SPDK_CU_ASSERT_FATAL(rc != 0);
+ CU_ASSERT(bio.iovpos == NVME_TCP_MAX_SGL_DESCRIPTORS);
+ for (i = 0; i < NVME_TCP_MAX_SGL_DESCRIPTORS; i++) {
+ CU_ASSERT(tcp_req.iov[i].iov_len == bio.iovs[i].iov_len);
+ CU_ASSERT((uint64_t)tcp_req.iov[i].iov_base == (uint64_t)bio.iovs[i].iov_base);
+ }
+}
+
+static void
+test_nvme_tcp_pdu_set_data_buf_with_md(void)
+{
+ struct nvme_tcp_pdu pdu = {};
+ struct iovec iovs[7] = {};
+ struct spdk_dif_ctx dif_ctx = {};
+ int rc;
+
+ pdu.dif_ctx = &dif_ctx;
+
+ rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0,
+ 0, 0, 0, 0, 0);
+ CU_ASSERT(rc == 0);
+
+ /* Single iovec case */
+ iovs[0].iov_base = (void *)0xDEADBEEF;
+ iovs[0].iov_len = 2080;
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 0, 500);
+
+ CU_ASSERT(dif_ctx.data_offset == 0);
+ CU_ASSERT(pdu.data_len == 500);
+ CU_ASSERT(pdu.data_iovcnt == 1);
+ CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 500);
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 500, 1000);
+
+ CU_ASSERT(dif_ctx.data_offset == 500);
+ CU_ASSERT(pdu.data_len == 1000);
+ CU_ASSERT(pdu.data_iovcnt == 1);
+ CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 500));
+ CU_ASSERT(pdu.data_iov[0].iov_len == 1016);
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iovs, 1, 1500, 548);
+
+ CU_ASSERT(dif_ctx.data_offset == 1500);
+ CU_ASSERT(pdu.data_len == 548);
+ CU_ASSERT(pdu.data_iovcnt == 1);
+ CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 1516));
+ CU_ASSERT(pdu.data_iov[0].iov_len == 564);
+
+ /* Multiple iovecs case */
+ iovs[0].iov_base = (void *)0xDEADBEEF;
+ iovs[0].iov_len = 256;
+ iovs[1].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x1000));
+ iovs[1].iov_len = 256 + 1;
+ iovs[2].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x2000));
+ iovs[2].iov_len = 4;
+ iovs[3].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x3000));
+ iovs[3].iov_len = 3 + 123;
+ iovs[4].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x4000));
+ iovs[4].iov_len = 389 + 6;
+ iovs[5].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x5000));
+ iovs[5].iov_len = 2 + 512 + 8 + 432;
+ iovs[6].iov_base = (void *)((uint8_t *)(0xDEADBEEF + 0x6000));
+ iovs[6].iov_len = 80 + 8;
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 0, 500);
+
+ CU_ASSERT(dif_ctx.data_offset == 0);
+ CU_ASSERT(pdu.data_len == 500);
+ CU_ASSERT(pdu.data_iovcnt == 2);
+ CU_ASSERT(pdu.data_iov[0].iov_base == (void *)0xDEADBEEF);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 256);
+ CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x1000));
+ CU_ASSERT(pdu.data_iov[1].iov_len == 244);
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 500, 1000);
+
+ CU_ASSERT(dif_ctx.data_offset == 500);
+ CU_ASSERT(pdu.data_len == 1000);
+ CU_ASSERT(pdu.data_iovcnt == 5);
+ CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x1000 + 244));
+ CU_ASSERT(pdu.data_iov[0].iov_len == 13);
+ CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x2000));
+ CU_ASSERT(pdu.data_iov[1].iov_len == 4);
+ CU_ASSERT(pdu.data_iov[2].iov_base == (void *)(0xDEADBEEF + 0x3000));
+ CU_ASSERT(pdu.data_iov[2].iov_len == 3 + 123);
+ CU_ASSERT(pdu.data_iov[3].iov_base == (void *)(0xDEADBEEF + 0x4000));
+ CU_ASSERT(pdu.data_iov[3].iov_len == 395);
+ CU_ASSERT(pdu.data_iov[4].iov_base == (void *)(0xDEADBEEF + 0x5000));
+ CU_ASSERT(pdu.data_iov[4].iov_len == 478);
+
+ nvme_tcp_pdu_set_data_buf(&pdu, iovs, 7, 1500, 548);
+
+ CU_ASSERT(dif_ctx.data_offset == 1500);
+ CU_ASSERT(pdu.data_len == 548);
+ CU_ASSERT(pdu.data_iovcnt == 2);
+ CU_ASSERT(pdu.data_iov[0].iov_base == (void *)(0xDEADBEEF + 0x5000 + 478));
+ CU_ASSERT(pdu.data_iov[0].iov_len == 476);
+ CU_ASSERT(pdu.data_iov[1].iov_base == (void *)(0xDEADBEEF + 0x6000));
+ CU_ASSERT(pdu.data_iov[1].iov_len == 88);
+}
+
+static void
+test_nvme_tcp_build_iovs_with_md(void)
+{
+ struct nvme_tcp_pdu pdu = {};
+ struct iovec iovs[11] = {};
+ struct spdk_dif_ctx dif_ctx = {};
+ uint32_t mapped_length = 0;
+ int rc;
+
+ rc = spdk_dif_ctx_init(&dif_ctx, 520, 8, true, false, SPDK_DIF_DISABLE, 0,
+ 0, 0, 0, 0, 0);
+ CU_ASSERT(rc == 0);
+
+ pdu.dif_ctx = &dif_ctx;
+
+ pdu.hdr.common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
+ pdu.hdr.common.hlen = sizeof(struct spdk_nvme_tcp_cmd);
+ pdu.hdr.common.plen = pdu.hdr.common.hlen + SPDK_NVME_TCP_DIGEST_LEN + 512 * 8 +
+ SPDK_NVME_TCP_DIGEST_LEN;
+ pdu.data_len = 512 * 8;
+ pdu.padding_len = 0;
+
+ pdu.data_iov[0].iov_base = (void *)0xDEADBEEF;
+ pdu.data_iov[0].iov_len = (512 + 8) * 8;
+ pdu.data_iovcnt = 1;
+
+ rc = nvme_tcp_build_iovs(iovs, 11, &pdu, true, true, &mapped_length);
+ CU_ASSERT(rc == 10);
+ CU_ASSERT(iovs[0].iov_base == (void *)&pdu.hdr.raw);
+ CU_ASSERT(iovs[0].iov_len == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN);
+ CU_ASSERT(iovs[1].iov_base == (void *)0xDEADBEEF);
+ CU_ASSERT(iovs[1].iov_len == 512);
+ CU_ASSERT(iovs[2].iov_base == (void *)(0xDEADBEEF + 520));
+ CU_ASSERT(iovs[2].iov_len == 512);
+ CU_ASSERT(iovs[3].iov_base == (void *)(0xDEADBEEF + 520 * 2));
+ CU_ASSERT(iovs[3].iov_len == 512);
+ CU_ASSERT(iovs[4].iov_base == (void *)(0xDEADBEEF + 520 * 3));
+ CU_ASSERT(iovs[4].iov_len == 512);
+ CU_ASSERT(iovs[5].iov_base == (void *)(0xDEADBEEF + 520 * 4));
+ CU_ASSERT(iovs[5].iov_len == 512);
+ CU_ASSERT(iovs[6].iov_base == (void *)(0xDEADBEEF + 520 * 5));
+ CU_ASSERT(iovs[6].iov_len == 512);
+ CU_ASSERT(iovs[7].iov_base == (void *)(0xDEADBEEF + 520 * 6));
+ CU_ASSERT(iovs[7].iov_len == 512);
+ CU_ASSERT(iovs[8].iov_base == (void *)(0xDEADBEEF + 520 * 7));
+ CU_ASSERT(iovs[8].iov_len == 512);
+ CU_ASSERT(iovs[9].iov_base == (void *)pdu.data_digest);
+ CU_ASSERT(iovs[9].iov_len == SPDK_NVME_TCP_DIGEST_LEN);
+ CU_ASSERT(mapped_length == sizeof(struct spdk_nvme_tcp_cmd) + SPDK_NVME_TCP_DIGEST_LEN +
+ 512 * 8 + SPDK_NVME_TCP_DIGEST_LEN);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_tcp", NULL, NULL);
+ CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf);
+ CU_ADD_TEST(suite, test_nvme_tcp_build_iovs);
+ CU_ADD_TEST(suite, test_nvme_tcp_build_sgl_request);
+ CU_ADD_TEST(suite, test_nvme_tcp_pdu_set_data_buf_with_md);
+ CU_ADD_TEST(suite, test_nvme_tcp_build_iovs_with_md);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_uevent.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_uevent.c/.gitignore
new file mode 100644
index 000000000..1cb0d98ad
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_uevent.c/.gitignore
@@ -0,0 +1 @@
+nvme_uevent_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_uevent.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_uevent.c/Makefile
new file mode 100644
index 000000000..98687efb8
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_uevent.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_uevent_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_uevent.c/nvme_uevent_ut.c b/src/spdk/test/unit/lib/nvme/nvme_uevent.c/nvme_uevent_ut.c
new file mode 100644
index 000000000..a9775c983
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_uevent.c/nvme_uevent_ut.c
@@ -0,0 +1,165 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "spdk/env.h"
+
+#include "common/lib/test_env.c"
+
+#include "nvme/nvme_uevent.c"
+
+#ifdef __linux__
+
+enum uevent_parse_event_return_type {
+ uevent_abnormal_exit = -1,
+ uevent_normal_exit = 0,
+ uevent_expected_continue = 1
+};
+
+#define SPDK_NVME_UEVENT_SUBSYSTEM_NULL 0xFF
+
+static void
+test_nvme_uevent_parse_event(void)
+{
+ char *commands;
+ struct spdk_uevent uevent = {};
+ int rc = uevent_normal_exit;
+
+ /* Simulate commands to check expected behaviors */
+ /* Linux kernel puts null characters after every uevent */
+
+ /* Case 1: Add wrong non-uio or vfio-pci /devices/pci0000:80/0000:80:01.0/0000:81:00.0/uio/uio0 */
+ commands =
+ "ACTION=add\0DEVPATH=/devices/pci0000:80/0000:80:01.0/0000:81:00.0/uio/uio0\0SUBSYSTEM= \0DRIVER= \0PCI_SLOT_NAME= \0";
+ uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_NULL;
+ uevent.action = 0;
+
+ rc = parse_event(commands, &uevent);
+
+ CU_ASSERT(rc == uevent_abnormal_exit);
+ CU_ASSERT(uevent.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_NULL);
+ CU_ASSERT(uevent.action == SPDK_NVME_UEVENT_ADD);
+
+ /* Case 2: Add uio /devices/pci0000:80/0000:80:01.0/0000:81:00.0/uio/uio0 */
+ commands =
+ "ACTION=add \0DEVPATH=/devices/pci0000:80/0000:80:01.0/0000:81:00.0/uio/uio0\0SUBSYSTEM=uio\0DRIVER=\0PCI_SLOT_NAME= \0";
+ uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_NULL;
+ uevent.action = 0;
+
+ rc = parse_event(commands, &uevent);
+
+ CU_ASSERT(rc == uevent_expected_continue);
+ CU_ASSERT(uevent.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_UIO);
+ CU_ASSERT(uevent.action == SPDK_NVME_UEVENT_ADD);
+
+ /* Case 3: Remove uio /devices/pci0000:80/0000:80:01.0/0000:81:00.0/uio/uio0 */
+ commands =
+ "ACTION=remove\0DEVPATH=/devices/pci0000:80/0000:80:01.0/0000:81:00.0/uio/uio0\0SUBSYSTEM=uio\0DRIVER=\0PCI_SLOT_NAME= \0";
+ uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_NULL;
+
+ rc = parse_event(commands, &uevent);
+
+ CU_ASSERT(rc == uevent_expected_continue);
+ CU_ASSERT(uevent.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_UIO);
+ CU_ASSERT(uevent.action == SPDK_NVME_UEVENT_REMOVE);
+
+ /* Case 4: Add vfio-pci 0000:81:00.0 */
+ commands = "ACTION=bind\0DEVPATH=\0SUBSYSTEM= \0DRIVER=vfio-pci\0PCI_SLOT_NAME=0000:81:00.0\0";
+ uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_NULL;
+
+ rc = parse_event(commands, &uevent);
+
+ CU_ASSERT(rc == uevent_expected_continue);
+ CU_ASSERT(uevent.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_VFIO);
+ CU_ASSERT(uevent.action == SPDK_NVME_UEVENT_ADD);
+
+ /* Case 5: Remove vfio-pci 0000:81:00.0 */
+ commands = "ACTION=remove\0DEVPATH= \0SUBSYSTEM= \0DRIVER=vfio-pci \0PCI_SLOT_NAME=0000:81:00.0\0";
+ uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_NULL;
+
+ rc = parse_event(commands, &uevent);
+
+ CU_ASSERT(rc == uevent_expected_continue);
+ CU_ASSERT(uevent.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_VFIO);
+ CU_ASSERT(uevent.action == SPDK_NVME_UEVENT_REMOVE);
+
+ /* Case 6: Add wrong vfio-pci addr 000000 */
+ commands = "ACTION=bind\0DEVPATH= \0SUBSYSTEM= \0DRIVER=vfio-pci \0PCI_SLOT_NAME=000000\0";
+ uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_NULL;
+
+ rc = parse_event(commands, &uevent);
+
+ CU_ASSERT(rc == uevent_abnormal_exit);
+ CU_ASSERT(uevent.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_VFIO);
+ CU_ASSERT(uevent.action == SPDK_NVME_UEVENT_ADD);
+
+ /* Case 7: Add wrong type vfio 0000:81:00.0 */
+ commands = "ACTION=bind\0DEVPATH= \0SUBSYSTEM= \0DRIVER=vfio \0PCI_SLOT_NAME=0000:81:00.0\0";
+ uevent.subsystem = SPDK_NVME_UEVENT_SUBSYSTEM_NULL;
+ uevent.action = 0;
+ rc = parse_event(commands, &uevent);
+
+ CU_ASSERT(rc == uevent_abnormal_exit);
+ CU_ASSERT(uevent.subsystem == SPDK_NVME_UEVENT_SUBSYSTEM_NULL);
+ CU_ASSERT(uevent.action == SPDK_NVME_UEVENT_ADD);
+}
+
+#else
+
+static void
+test_nvme_uevent_parse_event(void)
+{
+ CU_ASSERT(1);
+}
+
+#endif
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvme_uevent", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_nvme_uevent_parse_event);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}