summaryrefslogtreecommitdiffstats
path: root/src/spdk/test/unit/lib/nvme
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /src/spdk/test/unit/lib/nvme
parentInitial commit. (diff)
downloadceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.tar.xz
ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/test/unit/lib/nvme')
-rw-r--r--src/spdk/test/unit/lib/nvme/Makefile47
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme.c/nvme_ut.c1135
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c1795
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut.c645
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut.c116
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns.c/nvme_ns_ut.c163
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c1440
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut.c677
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_pcie.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_pcie.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c861
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_qpair.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_qpair.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c418
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_quirks.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_quirks.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_quirks.c/nvme_quirks_ut.c102
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_rdma.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_rdma.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c298
34 files changed, 8126 insertions, 0 deletions
diff --git a/src/spdk/test/unit/lib/nvme/Makefile b/src/spdk/test/unit/lib/nvme/Makefile
new file mode 100644
index 00000000..fb17a2d0
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/Makefile
@@ -0,0 +1,47 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = nvme.c nvme_ctrlr.c nvme_ctrlr_cmd.c nvme_ctrlr_ocssd_cmd.c nvme_ns.c nvme_ns_cmd.c nvme_ns_ocssd_cmd.c nvme_pcie.c nvme_qpair.c \
+ nvme_quirks.c \
+
+DIRS-$(CONFIG_RDMA) += nvme_rdma.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme.c/.gitignore
new file mode 100644
index 00000000..90c0c167
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme.c/.gitignore
@@ -0,0 +1 @@
+nvme_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme.c/Makefile
new file mode 100644
index 00000000..4202cf54
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme.c/nvme_ut.c b/src/spdk/test/unit/lib/nvme/nvme.c/nvme_ut.c
new file mode 100644
index 00000000..6925a2cf
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme.c/nvme_ut.c
@@ -0,0 +1,1135 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "spdk/env.h"
+
+#include "nvme/nvme.c"
+
+#include "spdk_internal/mock.h"
+
+#include "common/lib/test_env.c"
+
+DEFINE_STUB_V(nvme_ctrlr_fail,
+ (struct spdk_nvme_ctrlr *ctrlr, bool hot_remove))
+
+DEFINE_STUB_V(nvme_ctrlr_proc_get_ref, (struct spdk_nvme_ctrlr *ctrlr))
+
+DEFINE_STUB_V(nvme_ctrlr_proc_put_ref, (struct spdk_nvme_ctrlr *ctrlr))
+
+DEFINE_STUB(spdk_pci_nvme_enumerate, int,
+ (spdk_pci_enum_cb enum_cb, void *enum_ctx), -1)
+
+DEFINE_STUB(spdk_pci_device_get_id, struct spdk_pci_id,
+ (struct spdk_pci_device *pci_dev),
+ MOCK_STRUCT_INIT(.vendor_id = 0xffff, .device_id = 0xffff,
+ .subvendor_id = 0xffff, .subdevice_id = 0xffff))
+
+DEFINE_STUB(spdk_nvme_transport_available, bool,
+ (enum spdk_nvme_transport_type trtype), true)
+
+DEFINE_STUB(nvme_ctrlr_add_process, int,
+ (struct spdk_nvme_ctrlr *ctrlr, void *devhandle), 0)
+
+DEFINE_STUB(nvme_ctrlr_process_init, int,
+ (struct spdk_nvme_ctrlr *ctrlr), 0)
+
+DEFINE_STUB(spdk_pci_device_get_addr, struct spdk_pci_addr,
+ (struct spdk_pci_device *pci_dev), {0})
+
+DEFINE_STUB(nvme_ctrlr_get_ref_count, int,
+ (struct spdk_nvme_ctrlr *ctrlr), 0)
+
+DEFINE_STUB(dummy_probe_cb, bool,
+ (void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr_opts *opts), false)
+
+DEFINE_STUB(nvme_transport_ctrlr_construct, struct spdk_nvme_ctrlr *,
+ (const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ void *devhandle), NULL)
+
+DEFINE_STUB(spdk_nvme_qpair_process_completions, int32_t,
+ (struct spdk_nvme_qpair *qpair,
+ uint32_t max_completions), 0);
+
+static bool ut_destruct_called = false;
+void
+nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+ ut_destruct_called = true;
+}
+
+void
+spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
+{
+ memset(opts, 0, sizeof(*opts));
+}
+
+static void
+memset_trid(struct spdk_nvme_transport_id *trid1, struct spdk_nvme_transport_id *trid2)
+{
+ memset(trid1, 0, sizeof(struct spdk_nvme_transport_id));
+ memset(trid2, 0, sizeof(struct spdk_nvme_transport_id));
+}
+
+static bool ut_check_trtype = false;
+int
+nvme_transport_ctrlr_scan(const struct spdk_nvme_transport_id *trid,
+ void *cb_ctx,
+ spdk_nvme_probe_cb probe_cb,
+ spdk_nvme_remove_cb remove_cb,
+ bool direct_connect)
+{
+ struct spdk_nvme_ctrlr *ctrlr = NULL;
+
+ if (ut_check_trtype == true) {
+ CU_ASSERT(trid->trtype == SPDK_NVME_TRANSPORT_PCIE);
+ }
+
+ if (direct_connect == true && probe_cb) {
+ nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
+ ctrlr = spdk_nvme_get_ctrlr_by_trid(trid);
+ nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
+ probe_cb(cb_ctx, trid, &ctrlr->opts);
+ }
+ return 0;
+}
+
+static bool ut_attach_cb_called = false;
+static void
+dummy_attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
+{
+ ut_attach_cb_called = true;
+}
+
+static void
+test_spdk_nvme_probe(void)
+{
+ int rc = 0;
+ const struct spdk_nvme_transport_id *trid = NULL;
+ void *cb_ctx = NULL;
+ spdk_nvme_probe_cb probe_cb = NULL;
+ spdk_nvme_attach_cb attach_cb = dummy_attach_cb;
+ spdk_nvme_remove_cb remove_cb = NULL;
+ struct spdk_nvme_ctrlr ctrlr;
+ pthread_mutexattr_t attr;
+ struct nvme_driver dummy;
+ g_spdk_nvme_driver = &dummy;
+
+ /* driver init fails */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, NULL);
+ rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb);
+ CU_ASSERT(rc == -1);
+
+ /*
+ * For secondary processes, the attach_cb should automatically get
+ * called for any controllers already initialized by the primary
+ * process.
+ */
+ MOCK_SET(spdk_nvme_transport_available, false);
+ MOCK_SET(spdk_process_is_primary, true);
+ dummy.initialized = true;
+ g_spdk_nvme_driver = &dummy;
+ rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb);
+ CU_ASSERT(rc == -1);
+
+ /* driver init passes, transport available, secondary call attach_cb */
+ MOCK_SET(spdk_nvme_transport_available, true);
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, g_spdk_nvme_driver);
+ dummy.initialized = true;
+ memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
+ CU_ASSERT(pthread_mutex_init(&dummy.lock, &attr) == 0);
+ TAILQ_INIT(&dummy.shared_attached_ctrlrs);
+ TAILQ_INSERT_TAIL(&dummy.shared_attached_ctrlrs, &ctrlr, tailq);
+ ut_attach_cb_called = false;
+ /* setup nvme_transport_ctrlr_scan() stub to also check the trype */
+ ut_check_trtype = true;
+ rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_attach_cb_called == true);
+
+ /* driver init passes, transport available, we are primary */
+ MOCK_SET(spdk_process_is_primary, true);
+ TAILQ_INIT(&g_nvme_init_ctrlrs);
+ rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb);
+ CU_ASSERT(rc == 0);
+
+ g_spdk_nvme_driver = NULL;
+ /* reset to pre-test values */
+ MOCK_CLEAR(spdk_memzone_lookup);
+ ut_check_trtype = false;
+
+ pthread_mutex_destroy(&dummy.lock);
+ pthread_mutexattr_destroy(&attr);
+}
+
+static void
+test_spdk_nvme_connect(void)
+{
+ struct spdk_nvme_ctrlr *ret_ctrlr = NULL;
+ struct spdk_nvme_transport_id trid = {};
+ struct spdk_nvme_ctrlr_opts opts = {};
+ struct spdk_nvme_ctrlr ctrlr;
+ pthread_mutexattr_t attr;
+ struct nvme_driver dummy;
+
+ /* initialize the variable to prepare the test */
+ dummy.initialized = true;
+ TAILQ_INIT(&dummy.shared_attached_ctrlrs);
+ g_spdk_nvme_driver = &dummy;
+ CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
+ CU_ASSERT(pthread_mutex_init(&g_spdk_nvme_driver->lock, &attr) == 0);
+
+ /* set NULL trid pointer to test immediate return */
+ ret_ctrlr = spdk_nvme_connect(NULL, NULL, 0);
+ CU_ASSERT(ret_ctrlr == NULL);
+
+ /* driver init passes, transport available, secondary process connects ctrlr */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, g_spdk_nvme_driver);
+ MOCK_SET(spdk_nvme_transport_available, true);
+ memset(&trid, 0, sizeof(trid));
+ trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == NULL);
+
+ /* driver init passes, setup one ctrlr on the attached_list */
+ memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ snprintf(ctrlr.trid.traddr, sizeof(ctrlr.trid.traddr), "0000:01:00.0");
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &ctrlr, tailq);
+ /* get the ctrlr from the attached list */
+ snprintf(trid.traddr, sizeof(trid.traddr), "0000:01:00.0");
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ /* get the ctrlr from the attached list with default ctrlr opts */
+ ctrlr.opts.num_io_queues = DEFAULT_MAX_IO_QUEUES;
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
+ /* get the ctrlr from the attached list with default ctrlr opts and consistent opts_size */
+ opts.num_io_queues = 1;
+ ret_ctrlr = spdk_nvme_connect(&trid, &opts, sizeof(opts));
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, 1);
+ /* remove the attached ctrlr on the attached_list */
+ CU_ASSERT(spdk_nvme_detach(&ctrlr) == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
+
+ /* driver init passes, transport available, primary process connects ctrlr */
+ MOCK_SET(spdk_process_is_primary, true);
+ /* setup one ctrlr on the attached_list */
+ memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ snprintf(ctrlr.trid.traddr, sizeof(ctrlr.trid.traddr), "0000:02:00.0");
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &ctrlr, tailq);
+ /* get the ctrlr from the attached list */
+ snprintf(trid.traddr, sizeof(trid.traddr), "0000:02:00.0");
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ /* get the ctrlr from the attached list with default ctrlr opts */
+ ctrlr.opts.num_io_queues = DEFAULT_MAX_IO_QUEUES;
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
+ /* get the ctrlr from the attached list with default ctrlr opts and consistent opts_size */
+ opts.num_io_queues = 2;
+ ret_ctrlr = spdk_nvme_connect(&trid, &opts, sizeof(opts));
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, 2);
+ /* remove the attached ctrlr on the attached_list */
+ CU_ASSERT(spdk_nvme_detach(ret_ctrlr) == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
+
+ /* test driver init failure return */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, NULL);
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == NULL);
+}
+
+static void
+test_nvme_init_controllers(void)
+{
+ int rc = 0;
+ struct nvme_driver test_driver;
+ void *cb_ctx = NULL;
+ spdk_nvme_attach_cb attach_cb = dummy_attach_cb;
+ struct spdk_nvme_ctrlr ctrlr;
+ pthread_mutexattr_t attr;
+
+ g_spdk_nvme_driver = &test_driver;
+ memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
+ CU_ASSERT(pthread_mutex_init(&test_driver.lock, &attr) == 0);
+ TAILQ_INIT(&g_nvme_init_ctrlrs);
+ TAILQ_INSERT_TAIL(&g_nvme_init_ctrlrs, &ctrlr, tailq);
+ TAILQ_INIT(&test_driver.shared_attached_ctrlrs);
+
+ /*
+ * Try to initialize, but nvme_ctrlr_process_init will fail.
+ * Verify correct behavior when it does.
+ */
+ MOCK_SET(nvme_ctrlr_process_init, 1);
+ g_spdk_nvme_driver->initialized = false;
+ ut_destruct_called = false;
+ rc = nvme_init_controllers(cb_ctx, attach_cb);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(g_spdk_nvme_driver->initialized == true);
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_init_ctrlrs));
+ CU_ASSERT(ut_destruct_called == true);
+
+ /*
+ * Controller init OK, need to move the controller state machine
+ * forward by setting the ctrl state so that it can be moved
+ * the shared_attached_ctrlrs list.
+ */
+ TAILQ_INSERT_TAIL(&g_nvme_init_ctrlrs, &ctrlr, tailq);
+ ctrlr.state = NVME_CTRLR_STATE_READY;
+ MOCK_SET(nvme_ctrlr_process_init, 0);
+ rc = nvme_init_controllers(cb_ctx, attach_cb);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_attach_cb_called == true);
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_init_ctrlrs));
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_attached_ctrlrs));
+ CU_ASSERT(TAILQ_FIRST(&g_spdk_nvme_driver->shared_attached_ctrlrs) == &ctrlr);
+ TAILQ_REMOVE(&g_spdk_nvme_driver->shared_attached_ctrlrs, &ctrlr, tailq);
+
+ /*
+ * Non-PCIe controllers should be added to the per-process list, not the shared list.
+ */
+ memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+ TAILQ_INSERT_TAIL(&g_nvme_init_ctrlrs, &ctrlr, tailq);
+ ctrlr.state = NVME_CTRLR_STATE_READY;
+ MOCK_SET(nvme_ctrlr_process_init, 0);
+ rc = nvme_init_controllers(cb_ctx, attach_cb);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_attach_cb_called == true);
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_init_ctrlrs));
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
+ CU_ASSERT(TAILQ_FIRST(&g_nvme_attached_ctrlrs) == &ctrlr);
+ TAILQ_REMOVE(&g_nvme_attached_ctrlrs, &ctrlr, tailq);
+
+ g_spdk_nvme_driver = NULL;
+ pthread_mutexattr_destroy(&attr);
+ pthread_mutex_destroy(&test_driver.lock);
+}
+
+static void
+test_nvme_driver_init(void)
+{
+ int rc;
+ struct nvme_driver dummy;
+ g_spdk_nvme_driver = &dummy;
+
+ /* adjust this so testing doesn't take so long */
+ g_nvme_driver_timeout_ms = 100;
+
+ /* process is primary and mem already reserved */
+ MOCK_SET(spdk_process_is_primary, true);
+ dummy.initialized = true;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == 0);
+
+ /*
+ * Process is primary and mem not yet reserved but the call
+ * to spdk_memzone_reserve() returns NULL.
+ */
+ g_spdk_nvme_driver = NULL;
+ MOCK_SET(spdk_process_is_primary, true);
+ MOCK_SET(spdk_memzone_reserve, NULL);
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == -1);
+
+ /* process is not primary, no mem already reserved */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, NULL);
+ g_spdk_nvme_driver = NULL;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == -1);
+
+ /* process is not primary, mem is already reserved & init'd */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, (void *)&dummy);
+ dummy.initialized = true;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == 0);
+
+ /* process is not primary, mem is reserved but not initialized */
+ /* and times out */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_reserve, (void *)&dummy);
+ dummy.initialized = false;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == -1);
+
+ /* process is primary, got mem but mutex won't init */
+ MOCK_SET(spdk_process_is_primary, true);
+ MOCK_SET(spdk_memzone_reserve, (void *)&dummy);
+ MOCK_SET(pthread_mutexattr_init, -1);
+ g_spdk_nvme_driver = NULL;
+ dummy.initialized = true;
+ rc = nvme_driver_init();
+ /* for FreeBSD we can't can't effectively mock this path */
+#ifndef __FreeBSD__
+ CU_ASSERT(rc != 0);
+#else
+ CU_ASSERT(rc == 0);
+#endif
+
+ /* process is primary, got mem, mutex OK */
+ MOCK_SET(spdk_process_is_primary, true);
+ MOCK_CLEAR(pthread_mutexattr_init);
+ g_spdk_nvme_driver = NULL;
+ rc = nvme_driver_init();
+ CU_ASSERT(g_spdk_nvme_driver->initialized == false);
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_init_ctrlrs));
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
+ CU_ASSERT(rc == 0);
+
+ g_spdk_nvme_driver = NULL;
+ MOCK_CLEAR(spdk_memzone_reserve);
+ MOCK_CLEAR(spdk_memzone_lookup);
+}
+
+static void
+test_spdk_nvme_detach(void)
+{
+ int rc = 1;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_ctrlr *ret_ctrlr;
+ struct nvme_driver test_driver;
+
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+
+ g_spdk_nvme_driver = &test_driver;
+ TAILQ_INIT(&test_driver.shared_attached_ctrlrs);
+ TAILQ_INSERT_TAIL(&test_driver.shared_attached_ctrlrs, &ctrlr, tailq);
+ CU_ASSERT(pthread_mutex_init(&test_driver.lock, NULL) == 0);
+
+ /*
+ * Controllers are ref counted so mock the function that returns
+ * the ref count so that detach will actually call the destruct
+ * function which we've mocked simply to verify that it gets
+ * called (we aren't testing what the real destruct function does
+ * here.)
+ */
+ MOCK_SET(nvme_ctrlr_get_ref_count, 0);
+ rc = spdk_nvme_detach(&ctrlr);
+ ret_ctrlr = TAILQ_FIRST(&test_driver.shared_attached_ctrlrs);
+ CU_ASSERT(ret_ctrlr == NULL);
+ CU_ASSERT(ut_destruct_called == true);
+ CU_ASSERT(rc == 0);
+
+ /*
+ * Mock the ref count to 1 so we confirm that the destruct
+ * function is not called and that attached ctrl list is
+ * not empty.
+ */
+ MOCK_SET(nvme_ctrlr_get_ref_count, 1);
+ TAILQ_INSERT_TAIL(&test_driver.shared_attached_ctrlrs, &ctrlr, tailq);
+ ut_destruct_called = false;
+ rc = spdk_nvme_detach(&ctrlr);
+ ret_ctrlr = TAILQ_FIRST(&test_driver.shared_attached_ctrlrs);
+ CU_ASSERT(ret_ctrlr != NULL);
+ CU_ASSERT(ut_destruct_called == false);
+ CU_ASSERT(rc == 0);
+
+ /*
+ * Non-PCIe controllers should be on the per-process attached_ctrlrs list, not the
+ * shared_attached_ctrlrs list. Test an RDMA controller and ensure it is removed
+ * from the correct list.
+ */
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+ TAILQ_INIT(&g_nvme_attached_ctrlrs);
+ TAILQ_INSERT_TAIL(&g_nvme_attached_ctrlrs, &ctrlr, tailq);
+ MOCK_SET(nvme_ctrlr_get_ref_count, 0);
+ rc = spdk_nvme_detach(&ctrlr);
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_attached_ctrlrs));
+ CU_ASSERT(ut_destruct_called == true);
+ CU_ASSERT(rc == 0);
+
+ g_spdk_nvme_driver = NULL;
+ pthread_mutex_destroy(&test_driver.lock);
+}
+
+static void
+test_nvme_completion_poll_cb(void)
+{
+ struct nvme_completion_poll_status status;
+ struct spdk_nvme_cpl cpl;
+
+ memset(&status, 0x0, sizeof(status));
+ memset(&cpl, 0xff, sizeof(cpl));
+
+ nvme_completion_poll_cb(&status, &cpl);
+ CU_ASSERT(status.done == true);
+ CU_ASSERT(memcmp(&cpl, &status.cpl,
+ sizeof(struct spdk_nvme_cpl)) == 0);
+}
+
+/* stub callback used by test_nvme_user_copy_cmd_complete() */
+static struct spdk_nvme_cpl ut_spdk_nvme_cpl = {0};
+static void
+dummy_cb(void *user_cb_arg, struct spdk_nvme_cpl *cpl)
+{
+ ut_spdk_nvme_cpl = *cpl;
+}
+
+static void
+test_nvme_user_copy_cmd_complete(void)
+{
+ struct nvme_request req;
+ int test_data = 0xdeadbeef;
+ int buff_size = sizeof(int);
+ void *buff;
+ static struct spdk_nvme_cpl cpl;
+
+ memset(&req, 0, sizeof(req));
+ memset(&cpl, 0x5a, sizeof(cpl));
+
+ /* test without a user buffer provided */
+ req.user_cb_fn = (void *)dummy_cb;
+ nvme_user_copy_cmd_complete(&req, &cpl);
+ CU_ASSERT(memcmp(&ut_spdk_nvme_cpl, &cpl, sizeof(cpl)) == 0);
+
+ /* test with a user buffer provided */
+ req.user_buffer = malloc(buff_size);
+ SPDK_CU_ASSERT_FATAL(req.user_buffer != NULL);
+ memset(req.user_buffer, 0, buff_size);
+ req.payload_size = buff_size;
+ buff = spdk_dma_zmalloc(buff_size, 0x100, NULL);
+ SPDK_CU_ASSERT_FATAL(buff != NULL);
+ req.payload = NVME_PAYLOAD_CONTIG(buff, NULL);
+ memcpy(buff, &test_data, buff_size);
+ req.cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
+ req.pid = getpid();
+
+ /* zero out the test value set in the callback */
+ memset(&ut_spdk_nvme_cpl, 0, sizeof(ut_spdk_nvme_cpl));
+
+ nvme_user_copy_cmd_complete(&req, &cpl);
+ CU_ASSERT(memcmp(req.user_buffer, &test_data, buff_size) == 0);
+ CU_ASSERT(memcmp(&ut_spdk_nvme_cpl, &cpl, sizeof(cpl)) == 0);
+
+ /*
+ * Now test the same path as above but this time choose an opc
+ * that results in a different data transfer type.
+ */
+ memset(&ut_spdk_nvme_cpl, 0, sizeof(ut_spdk_nvme_cpl));
+ memset(req.user_buffer, 0, buff_size);
+ buff = spdk_dma_zmalloc(buff_size, 0x100, NULL);
+ SPDK_CU_ASSERT_FATAL(buff != NULL);
+ req.payload = NVME_PAYLOAD_CONTIG(buff, NULL);
+ memcpy(buff, &test_data, buff_size);
+ req.cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ nvme_user_copy_cmd_complete(&req, &cpl);
+ CU_ASSERT(memcmp(req.user_buffer, &test_data, buff_size) != 0);
+ CU_ASSERT(memcmp(&ut_spdk_nvme_cpl, &cpl, sizeof(cpl)) == 0);
+
+ /* clean up */
+ free(req.user_buffer);
+}
+
+static void
+test_nvme_allocate_request_null(void)
+{
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = (spdk_nvme_cmd_cb)0x1234;
+ void *cb_arg = (void *)0x5678;
+ struct nvme_request *req = NULL;
+ struct nvme_request dummy_req;
+
+ STAILQ_INIT(&qpair.free_req);
+ STAILQ_INIT(&qpair.queued_req);
+
+ /*
+ * Put a dummy on the queue so we can make a request
+ * and confirm that what comes back is what we expect.
+ */
+ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
+
+ req = nvme_allocate_request_null(&qpair, cb_fn, cb_arg);
+
+ /*
+ * Compare the req with the parmaters that we passed in
+ * as well as what the function is supposed to update.
+ */
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ CU_ASSERT(req->cb_fn == cb_fn);
+ CU_ASSERT(req->cb_arg == cb_arg);
+ CU_ASSERT(req->pid == getpid());
+ CU_ASSERT(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);
+ CU_ASSERT(req->payload.md == NULL);
+ CU_ASSERT(req->payload.contig_or_cb_arg == NULL);
+}
+
+static void
+test_nvme_allocate_request(void)
+{
+ struct spdk_nvme_qpair qpair;
+ struct nvme_payload payload;
+ uint32_t payload_struct_size = sizeof(payload);
+ spdk_nvme_cmd_cb cb_fn = (spdk_nvme_cmd_cb)0x1234;
+ void *cb_arg = (void *)0x6789;
+ struct nvme_request *req = NULL;
+ struct nvme_request dummy_req;
+
+ /* Fill the whole payload struct with a known pattern */
+ memset(&payload, 0x5a, payload_struct_size);
+ STAILQ_INIT(&qpair.free_req);
+ STAILQ_INIT(&qpair.queued_req);
+
+ /* Test trying to allocate a request when no requests are available */
+ req = nvme_allocate_request(&qpair, &payload, payload_struct_size,
+ cb_fn, cb_arg);
+ CU_ASSERT(req == NULL);
+
+ /* put a dummy on the queue, and then allocate one */
+ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
+ req = nvme_allocate_request(&qpair, &payload, payload_struct_size,
+ cb_fn, cb_arg);
+
+ /* all the req elements should now match the passed in paramters */
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ CU_ASSERT(req->cb_fn == cb_fn);
+ CU_ASSERT(req->cb_arg == cb_arg);
+ CU_ASSERT(memcmp(&req->payload, &payload, payload_struct_size) == 0);
+ CU_ASSERT(req->payload_size == payload_struct_size);
+ CU_ASSERT(req->qpair == &qpair);
+ CU_ASSERT(req->pid == getpid());
+}
+
+static void
+test_nvme_free_request(void)
+{
+ struct nvme_request match_req;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request *req;
+
+ /* put a req on the Q, take it off and compare */
+ memset(&match_req.cmd, 0x5a, sizeof(struct spdk_nvme_cmd));
+ match_req.qpair = &qpair;
+ /* the code under tests asserts this condition */
+ match_req.num_children = 0;
+ STAILQ_INIT(&qpair.free_req);
+
+ nvme_free_request(&match_req);
+ req = STAILQ_FIRST(&match_req.qpair->free_req);
+ CU_ASSERT(req == &match_req);
+}
+
+static void
+test_nvme_allocate_request_user_copy(void)
+{
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = (spdk_nvme_cmd_cb)0x12345;
+ void *cb_arg = (void *)0x12345;
+ bool host_to_controller = true;
+ struct nvme_request *req;
+ struct nvme_request dummy_req;
+ int test_data = 0xdeadbeef;
+ void *buffer = NULL;
+ uint32_t payload_size = sizeof(int);
+
+ STAILQ_INIT(&qpair.free_req);
+ STAILQ_INIT(&qpair.queued_req);
+
+ /* no buffer or valid payload size, early NULL return */
+ req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
+ cb_arg, host_to_controller);
+ CU_ASSERT(req == NULL);
+
+ /* good buffer and valid payload size */
+ buffer = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ memcpy(buffer, &test_data, payload_size);
+
+ /* put a dummy on the queue */
+ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
+
+ MOCK_CLEAR(spdk_malloc)
+ MOCK_CLEAR(spdk_zmalloc)
+ req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
+ cb_arg, host_to_controller);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ CU_ASSERT(req->user_cb_fn == cb_fn);
+ CU_ASSERT(req->user_cb_arg == cb_arg);
+ CU_ASSERT(req->user_buffer == buffer);
+ CU_ASSERT(req->cb_arg == req);
+ CU_ASSERT(memcmp(req->payload.contig_or_cb_arg, buffer, payload_size) == 0);
+ spdk_dma_free(req->payload.contig_or_cb_arg);
+
+ /* same thing but additional path coverage, no copy */
+ host_to_controller = false;
+ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
+
+ req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
+ cb_arg, host_to_controller);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ CU_ASSERT(req->user_cb_fn == cb_fn);
+ CU_ASSERT(req->user_cb_arg == cb_arg);
+ CU_ASSERT(req->user_buffer == buffer);
+ CU_ASSERT(req->cb_arg == req);
+ CU_ASSERT(memcmp(req->payload.contig_or_cb_arg, buffer, payload_size) != 0);
+ spdk_dma_free(req->payload.contig_or_cb_arg);
+
+ /* good buffer and valid payload size but make spdk_dma_zmalloc fail */
+ /* set the mock pointer to NULL for spdk_dma_zmalloc */
+ MOCK_SET(spdk_dma_zmalloc, NULL);
+ req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
+ cb_arg, host_to_controller);
+ CU_ASSERT(req == NULL);
+ free(buffer);
+ MOCK_CLEAR(spdk_dma_zmalloc);
+}
+
+static void
+test_nvme_ctrlr_probe(void)
+{
+ int rc = 0;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ const struct spdk_nvme_transport_id trid = {};
+ void *devhandle = NULL;
+ void *cb_ctx = NULL;
+ struct spdk_nvme_ctrlr *dummy = NULL;
+
+ /* test when probe_cb returns false */
+ MOCK_SET(dummy_probe_cb, false);
+ rc = nvme_ctrlr_probe(&trid, devhandle, dummy_probe_cb, cb_ctx);
+ CU_ASSERT(rc == 1);
+
+ /* probe_cb returns true but we can't construct a ctrl */
+ MOCK_SET(dummy_probe_cb, true);
+ MOCK_SET(nvme_transport_ctrlr_construct, NULL);
+ rc = nvme_ctrlr_probe(&trid, devhandle, dummy_probe_cb, cb_ctx);
+ CU_ASSERT(rc == -1);
+
+ /* happy path */
+ g_spdk_nvme_driver = malloc(sizeof(struct nvme_driver));
+ SPDK_CU_ASSERT_FATAL(g_spdk_nvme_driver != NULL);
+ MOCK_SET(dummy_probe_cb, true);
+ MOCK_SET(nvme_transport_ctrlr_construct, &ctrlr);
+ TAILQ_INIT(&g_nvme_init_ctrlrs);
+ rc = nvme_ctrlr_probe(&trid, devhandle, dummy_probe_cb, cb_ctx);
+ CU_ASSERT(rc == 0);
+ dummy = TAILQ_FIRST(&g_nvme_init_ctrlrs);
+ CU_ASSERT(dummy == ut_nvme_transport_ctrlr_construct);
+ TAILQ_REMOVE(&g_nvme_init_ctrlrs, dummy, tailq);
+ MOCK_CLEAR_P(nvme_transport_ctrlr_construct);
+
+ free(g_spdk_nvme_driver);
+}
+
+static void
+test_nvme_robust_mutex_init_shared(void)
+{
+ pthread_mutex_t mtx;
+ int rc = 0;
+
+ /* test where both pthread calls succeed */
+ MOCK_SET(pthread_mutexattr_init, 0);
+ MOCK_SET(pthread_mutex_init, 0);
+ rc = nvme_robust_mutex_init_shared(&mtx);
+ CU_ASSERT(rc == 0);
+
+ /* test where we can't init attr's but init mutex works */
+ MOCK_SET(pthread_mutexattr_init, -1);
+ MOCK_SET(pthread_mutex_init, 0);
+ rc = nvme_robust_mutex_init_shared(&mtx);
+ /* for FreeBSD the only possible return value is 0 */
+#ifndef __FreeBSD__
+ CU_ASSERT(rc != 0);
+#else
+ CU_ASSERT(rc == 0);
+#endif
+
+ /* test where we can init attr's but the mutex init fails */
+ MOCK_SET(pthread_mutexattr_init, 0);
+ MOCK_SET(pthread_mutex_init, -1);
+ rc = nvme_robust_mutex_init_shared(&mtx);
+ /* for FreeBSD the only possible return value is 0 */
+#ifndef __FreeBSD__
+ CU_ASSERT(rc != 0);
+#else
+ CU_ASSERT(rc == 0);
+#endif
+}
+
+static void
+test_opc_data_transfer(void)
+{
+ enum spdk_nvme_data_transfer xfer;
+
+ xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_FLUSH);
+ CU_ASSERT(xfer == SPDK_NVME_DATA_NONE);
+
+ xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_WRITE);
+ CU_ASSERT(xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
+
+ xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_READ);
+ CU_ASSERT(xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
+
+ xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
+}
+
+static void
+test_trid_parse_and_compare(void)
+{
+ struct spdk_nvme_transport_id trid1, trid2;
+ int ret;
+
+ /* set trid1 trid2 value to id parse */
+ ret = spdk_nvme_transport_id_parse(NULL, "trtype:PCIe traddr:0000:04:00.0");
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ ret = spdk_nvme_transport_id_parse(&trid1, NULL);
+ CU_ASSERT(ret == -EINVAL);
+ ret = spdk_nvme_transport_id_parse(NULL, NULL);
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ ret = spdk_nvme_transport_id_parse(&trid1, "trtype-PCIe traddr-0000-04-00.0");
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ ret = spdk_nvme_transport_id_parse(&trid1, "trtype-PCIe traddr-0000-04-00.0-:");
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ ret = spdk_nvme_transport_id_parse(&trid1, " \t\n:");
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1,
+ "trtype:rdma\n"
+ "adrfam:ipv4\n"
+ "traddr:192.168.100.8\n"
+ "trsvcid:4420\n"
+ "subnqn:nqn.2014-08.org.nvmexpress.discovery") == 0);
+ CU_ASSERT(trid1.trtype == SPDK_NVME_TRANSPORT_RDMA);
+ CU_ASSERT(trid1.adrfam == SPDK_NVMF_ADRFAM_IPV4);
+ CU_ASSERT(strcmp(trid1.traddr, "192.168.100.8") == 0);
+ CU_ASSERT(strcmp(trid1.trsvcid, "4420") == 0);
+ CU_ASSERT(strcmp(trid1.subnqn, "nqn.2014-08.org.nvmexpress.discovery") == 0);
+
+ memset(&trid2, 0, sizeof(trid2));
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:0000:04:00.0") == 0);
+ CU_ASSERT(trid2.trtype == SPDK_NVME_TRANSPORT_PCIE);
+ CU_ASSERT(strcmp(trid2.traddr, "0000:04:00.0") == 0);
+
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) != 0);
+
+ /* set trid1 trid2 and test id_compare */
+ memset_trid(&trid1, &trid2);
+ trid1.adrfam = SPDK_NVMF_ADRFAM_IPV6;
+ trid2.adrfam = SPDK_NVMF_ADRFAM_IPV4;
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret > 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.traddr, sizeof(trid1.traddr), "192.168.100.8");
+ snprintf(trid2.traddr, sizeof(trid2.traddr), "192.168.100.9");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret < 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.trsvcid, sizeof(trid1.trsvcid), "4420");
+ snprintf(trid2.trsvcid, sizeof(trid2.trsvcid), "4421");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret < 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.subnqn, sizeof(trid1.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
+ snprintf(trid2.subnqn, sizeof(trid2.subnqn), "subnqn:nqn.2017-08.org.nvmexpress.discovery");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret < 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.subnqn, sizeof(trid1.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
+ snprintf(trid2.subnqn, sizeof(trid2.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret == 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.subnqn, sizeof(trid1.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
+ snprintf(trid2.subnqn, sizeof(trid2.subnqn), "subnqn:nqn.2016-08.org.Nvmexpress.discovery");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret > 0);
+
+ memset_trid(&trid1, &trid2);
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret == 0);
+
+ /* Compare PCI addresses via spdk_pci_addr_compare (rather than as strings) */
+ memset_trid(&trid1, &trid2);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:PCIe traddr:0000:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) == 0);
+
+ memset_trid(&trid1, &trid2);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:PCIe traddr:0000:05:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) > 0);
+
+ memset_trid(&trid1, &trid2);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:PCIe traddr:0000:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:05:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) < 0);
+
+ memset_trid(&trid1, &trid2);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype=PCIe traddr=0000:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype=PCIe traddr=05:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) < 0);
+}
+
+static void
+test_spdk_nvme_transport_id_parse_trtype(void)
+{
+
+ enum spdk_nvme_transport_type *trtype;
+ enum spdk_nvme_transport_type sct;
+ char *str;
+
+ trtype = NULL;
+ str = "unit_test";
+
+ /* test function returned value when trtype is NULL but str not NULL */
+ CU_ASSERT(spdk_nvme_transport_id_parse_trtype(trtype, str) == (-EINVAL));
+
+ /* test function returned value when str is NULL but trtype not NULL */
+ trtype = &sct;
+ str = NULL;
+ CU_ASSERT(spdk_nvme_transport_id_parse_trtype(trtype, str) == (-EINVAL));
+
+ /* test function returned value when str and strtype not NULL, but str value
+ * not "PCIe" or "RDMA" */
+ str = "unit_test";
+ CU_ASSERT(spdk_nvme_transport_id_parse_trtype(trtype, str) == (-ENOENT));
+
+ /* test trtype value when use function "strcasecmp" to compare str and "PCIe",not case-sensitive */
+ str = "PCIe";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_PCIE);
+
+ str = "pciE";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_PCIE);
+
+ /* test trtype value when use function "strcasecmp" to compare str and "RDMA",not case-sensitive */
+ str = "RDMA";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_RDMA);
+
+ str = "rdma";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_RDMA);
+
+}
+
+static void
+test_spdk_nvme_transport_id_parse_adrfam(void)
+{
+
+ enum spdk_nvmf_adrfam *adrfam;
+ enum spdk_nvmf_adrfam sct;
+ char *str;
+
+ adrfam = NULL;
+ str = "unit_test";
+
+ /* test function returned value when adrfam is NULL but str not NULL */
+ CU_ASSERT(spdk_nvme_transport_id_parse_adrfam(adrfam, str) == (-EINVAL));
+
+ /* test function returned value when str is NULL but adrfam not NULL */
+ adrfam = &sct;
+ str = NULL;
+ CU_ASSERT(spdk_nvme_transport_id_parse_adrfam(adrfam, str) == (-EINVAL));
+
+ /* test function returned value when str and adrfam not NULL, but str value
+ * not "IPv4" or "IPv6" or "IB" or "FC" */
+ str = "unit_test";
+ CU_ASSERT(spdk_nvme_transport_id_parse_adrfam(adrfam, str) == (-ENOENT));
+
+ /* test adrfam value when use function "strcasecmp" to compare str and "IPv4",not case-sensitive */
+ str = "IPv4";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV4);
+
+ str = "ipV4";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV4);
+
+ /* test adrfam value when use function "strcasecmp" to compare str and "IPv6",not case-sensitive */
+ str = "IPv6";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV6);
+
+ str = "ipV6";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV6);
+
+ /* test adrfam value when use function "strcasecmp" to compare str and "IB",not case-sensitive */
+ str = "IB";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IB);
+
+ str = "ib";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IB);
+
+ /* test adrfam value when use function "strcasecmp" to compare str and "FC",not case-sensitive */
+ str = "FC";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_FC);
+
+ str = "fc";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_FC);
+
+}
+
+static void
+test_trid_trtype_str(void)
+{
+ const char *s;
+
+ s = spdk_nvme_transport_id_trtype_str(-5);
+ CU_ASSERT(s == NULL);
+
+ s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_PCIE);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "PCIe") == 0);
+
+ s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_RDMA);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "RDMA") == 0);
+}
+
+static void
+test_trid_adrfam_str(void)
+{
+ const char *s;
+
+ s = spdk_nvme_transport_id_adrfam_str(-5);
+ CU_ASSERT(s == NULL);
+
+ s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_IPV4);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "IPv4") == 0);
+
+ s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_IPV6);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "IPv6") == 0);
+
+ s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_IB);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "IB") == 0);
+
+ s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_FC);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "FC") == 0);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test_opc_data_transfer",
+ test_opc_data_transfer) == NULL ||
+ CU_add_test(suite, "test_spdk_nvme_transport_id_parse_trtype",
+ test_spdk_nvme_transport_id_parse_trtype) == NULL ||
+ CU_add_test(suite, "test_spdk_nvme_transport_id_parse_adrfam",
+ test_spdk_nvme_transport_id_parse_adrfam) == NULL ||
+ CU_add_test(suite, "test_trid_parse_and_compare",
+ test_trid_parse_and_compare) == NULL ||
+ CU_add_test(suite, "test_trid_trtype_str",
+ test_trid_trtype_str) == NULL ||
+ CU_add_test(suite, "test_trid_adrfam_str",
+ test_trid_adrfam_str) == NULL ||
+ CU_add_test(suite, "test_nvme_ctrlr_probe",
+ test_nvme_ctrlr_probe) == NULL ||
+ CU_add_test(suite, "test_spdk_nvme_probe",
+ test_spdk_nvme_probe) == NULL ||
+ CU_add_test(suite, "test_spdk_nvme_connect",
+ test_spdk_nvme_connect) == NULL ||
+ CU_add_test(suite, "test_nvme_init_controllers",
+ test_nvme_init_controllers) == NULL ||
+ CU_add_test(suite, "test_nvme_driver_init",
+ test_nvme_driver_init) == NULL ||
+ CU_add_test(suite, "test_spdk_nvme_detach",
+ test_spdk_nvme_detach) == NULL ||
+ CU_add_test(suite, "test_nvme_completion_poll_cb",
+ test_nvme_completion_poll_cb) == NULL ||
+ CU_add_test(suite, "test_nvme_user_copy_cmd_complete",
+ test_nvme_user_copy_cmd_complete) == NULL ||
+ CU_add_test(suite, "test_nvme_allocate_request_null",
+ test_nvme_allocate_request_null) == NULL ||
+ CU_add_test(suite, "test_nvme_allocate_request",
+ test_nvme_allocate_request) == NULL ||
+ CU_add_test(suite, "test_nvme_free_request",
+ test_nvme_free_request) == NULL ||
+ CU_add_test(suite, "test_nvme_allocate_request_user_copy",
+ test_nvme_allocate_request_user_copy) == NULL ||
+ CU_add_test(suite, "test_nvme_robust_mutex_init_shared",
+ test_nvme_robust_mutex_init_shared) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/.gitignore
new file mode 100644
index 00000000..97a75bee
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/.gitignore
@@ -0,0 +1 @@
+nvme_ctrlr_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/Makefile
new file mode 100644
index 00000000..3ce33dc4
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ctrlr_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c
new file mode 100644
index 00000000..db7469ff
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c
@@ -0,0 +1,1795 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "spdk_internal/log.h"
+
+#include "common/lib/test_env.c"
+
+struct spdk_trace_flag SPDK_LOG_NVME = {
+ .name = "nvme",
+ .enabled = false,
+};
+
+#include "nvme/nvme_ctrlr.c"
+#include "nvme/nvme_quirks.c"
+
+pid_t g_spdk_nvme_pid;
+
+struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+struct nvme_driver *g_spdk_nvme_driver = &_g_nvme_driver;
+
+struct spdk_nvme_registers g_ut_nvme_regs = {};
+
+__thread int nvme_thread_ioq_index = -1;
+
+uint32_t set_size = 1;
+
+int set_status_cpl = -1;
+
+DEFINE_STUB(nvme_ctrlr_cmd_set_host_id, int,
+ (struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
+DEFINE_STUB(nvme_ctrlr_identify_ns, int, (struct spdk_nvme_ns *ns), 0);
+DEFINE_STUB(nvme_ctrlr_identify_id_desc, int, (struct spdk_nvme_ns *ns), 0);
+DEFINE_STUB_V(nvme_ns_set_identify_data, (struct spdk_nvme_ns *ns));
+
+struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ void *devhandle)
+{
+ return NULL;
+}
+
+int
+nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+ nvme_ctrlr_destruct_finish(ctrlr);
+
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
+{
+ SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
+ *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
+{
+ SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
+ *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
+{
+ SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
+ *value = *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset);
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
+{
+ SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
+ *value = *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset);
+ return 0;
+}
+
+uint32_t
+nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return UINT32_MAX;
+}
+
+uint16_t
+nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 1;
+}
+
+void *
+nvme_transport_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size)
+{
+ return NULL;
+}
+
+int
+nvme_transport_ctrlr_free_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, void *buf, size_t size)
+{
+ return 0;
+}
+
+struct spdk_nvme_qpair *
+nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
+ const struct spdk_nvme_io_qpair_opts *opts)
+{
+ struct spdk_nvme_qpair *qpair;
+
+ qpair = calloc(1, sizeof(*qpair));
+ SPDK_CU_ASSERT_FATAL(qpair != NULL);
+
+ qpair->ctrlr = ctrlr;
+ qpair->id = qid;
+ qpair->qprio = opts->qprio;
+
+ return qpair;
+}
+
+int
+nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
+{
+ free(qpair);
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_reinit_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
+{
+ return 0;
+}
+
+int
+nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
+{
+ return 0;
+}
+
+int
+nvme_driver_init(void)
+{
+ return 0;
+}
+
+int nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
+ struct spdk_nvme_ctrlr *ctrlr,
+ enum spdk_nvme_qprio qprio,
+ uint32_t num_requests)
+{
+ qpair->id = id;
+ qpair->qprio = qprio;
+ qpair->ctrlr = ctrlr;
+
+ return 0;
+}
+
+static void
+fake_cpl_success(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ struct spdk_nvme_cpl cpl = {};
+
+ cpl.status.sc = SPDK_NVME_SC_SUCCESS;
+ cb_fn(cb_arg, &cpl);
+}
+
+int
+spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
+ uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ CU_ASSERT(0);
+ return -1;
+}
+
+int
+spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
+ uint32_t cdw11, void *payload, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ CU_ASSERT(0);
+ return -1;
+}
+
+int
+spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
+ uint32_t nsid, void *payload, uint32_t payload_size,
+ uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl_success(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST);
+
+ /*
+ * For the purposes of this unit test, we don't need to bother emulating request submission.
+ */
+
+ return 0;
+}
+
+int32_t
+spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ return 0;
+}
+
+void
+nvme_qpair_disable(struct spdk_nvme_qpair *qpair)
+{
+}
+
+void
+nvme_qpair_enable(struct spdk_nvme_qpair *qpair)
+{
+}
+
+void
+nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ struct nvme_completion_poll_status *status = arg;
+
+ status->cpl = *cpl;
+ status->done = true;
+}
+
+int
+spdk_nvme_wait_for_completion_robust_lock(
+ struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status,
+ pthread_mutex_t *robust_mutex)
+{
+ status->done = true;
+ memset(&status->cpl, 0, sizeof(status->cpl));
+ status->cpl.status.sc = 0;
+ if (set_status_cpl == 1) {
+ status->cpl.status.sc = 1;
+ }
+ return spdk_nvme_cpl_is_error(&status->cpl) ? -EIO : 0;
+}
+
+int
+spdk_nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status)
+{
+ return spdk_nvme_wait_for_completion_robust_lock(qpair, status, NULL);
+}
+
+
+int
+nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
+ union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
+ void *cb_arg)
+{
+ fake_cpl_success(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
+ void *payload, size_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ if (cns == SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST) {
+ uint32_t count = 0;
+ uint32_t i = 0;
+ struct spdk_nvme_ns_list *ns_list = (struct spdk_nvme_ns_list *)payload;
+
+ for (i = 1; i <= ctrlr->num_ns; i++) {
+ if (i <= nsid) {
+ continue;
+ }
+
+ ns_list->ns_list[count++] = i;
+ if (count == SPDK_COUNTOF(ns_list->ns_list)) {
+ break;
+ }
+ }
+
+ }
+ fake_cpl_success(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
+ uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl_success(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl_success(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
+ struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
+ struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
+ void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_fw_commit *fw_commit,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ CU_ASSERT(fw_commit->ca == SPDK_NVME_FW_COMMIT_REPLACE_IMG);
+ if (fw_commit->fs == 0) {
+ return -1;
+ }
+ set_status_cpl = 1;
+ if (ctrlr->is_resetting == true) {
+ set_status_cpl = 0;
+ }
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
+ uint32_t size, uint32_t offset, void *payload,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ if ((size != 0 && payload == NULL) || (size == 0 && payload != NULL)) {
+ return -1;
+ }
+ CU_ASSERT(offset == 0);
+ return 0;
+}
+
+void
+nvme_ns_destruct(struct spdk_nvme_ns *ns)
+{
+}
+
+int
+nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
+ struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+#define DECLARE_AND_CONSTRUCT_CTRLR() \
+ struct spdk_nvme_ctrlr ctrlr = {}; \
+ struct spdk_nvme_qpair adminq = {}; \
+ struct nvme_request req; \
+ \
+ STAILQ_INIT(&adminq.free_req); \
+ STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq); \
+ ctrlr.adminq = &adminq;
+
+static void
+test_nvme_ctrlr_init_en_1_rdy_0(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 1, CSTS.RDY = 0
+ */
+ g_ut_nvme_regs.cc.bits.en = 1;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ * init() should set CC.EN = 0.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Transition to CSTS.RDY = 0.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ /*
+ * Transition to CC.EN = 1
+ */
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_1_rdy_1(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 1, CSTS.RDY = 1
+ * init() should set CC.EN = 0.
+ */
+ g_ut_nvme_regs.cc.bits.en = 1;
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Transition to CSTS.RDY = 0.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ /*
+ * Transition to CC.EN = 1
+ */
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Default round robin enabled
+ */
+ g_ut_nvme_regs.cap.bits.ams = 0x0;
+ ctrlr.cap = g_ut_nvme_regs.cap;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ /*
+ * Case 1: default round robin arbitration mechanism selected
+ */
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 2: weighted round robin arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 3: vendor specific arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 4: invalid arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 5: reset to default round robin arbitration mechanism
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Weighted round robin enabled
+ */
+ g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_WRR;
+ ctrlr.cap = g_ut_nvme_regs.cap;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ /*
+ * Case 1: default round robin arbitration mechanism selected
+ */
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 2: weighted round robin arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 3: vendor specific arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 4: invalid arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 5: reset to weighted round robin arbitration mechanism
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+static void
+test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Default round robin enabled
+ */
+ g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_VS;
+ ctrlr.cap = g_ut_nvme_regs.cap;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ /*
+ * Case 1: default round robin arbitration mechanism selected
+ */
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 2: weighted round robin arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 3: vendor specific arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 4: invalid arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 5: reset to vendor specific arbitration mechanism
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_0_rdy_0(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_0_rdy_1(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 1
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+
+ /*
+ * Transition to CSTS.RDY = 0.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ /*
+ * Transition to CC.EN = 1
+ */
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+setup_qpairs(struct spdk_nvme_ctrlr *ctrlr, uint32_t num_io_queues)
+{
+ uint32_t i;
+
+ CU_ASSERT(pthread_mutex_init(&ctrlr->ctrlr_lock, NULL) == 0);
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(ctrlr) == 0);
+
+ ctrlr->page_size = 0x1000;
+ ctrlr->opts.num_io_queues = num_io_queues;
+ ctrlr->free_io_qids = spdk_bit_array_create(num_io_queues + 1);
+ SPDK_CU_ASSERT_FATAL(ctrlr->free_io_qids != NULL);
+
+ spdk_bit_array_clear(ctrlr->free_io_qids, 0);
+ for (i = 1; i <= num_io_queues; i++) {
+ spdk_bit_array_set(ctrlr->free_io_qids, i);
+ }
+}
+
+static void
+cleanup_qpairs(struct spdk_nvme_ctrlr *ctrlr)
+{
+ nvme_ctrlr_destruct(ctrlr);
+}
+
+static void
+test_alloc_io_qpair_rr_1(void)
+{
+ struct spdk_nvme_io_qpair_opts opts;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_qpair *q0;
+
+ setup_qpairs(&ctrlr, 1);
+
+ /*
+ * Fake to simulate the controller with default round robin
+ * arbitration mechanism.
+ */
+ g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_RR;
+
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
+
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
+ /* Only 1 I/O qpair was allocated, so this should fail */
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0) == NULL);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+
+ /*
+ * Now that the qpair has been returned to the free list,
+ * we should be able to allocate it again.
+ */
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+
+ /* Only 0 qprio is acceptable for default round robin arbitration mechanism */
+ opts.qprio = 1;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 == NULL);
+
+ opts.qprio = 2;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 == NULL);
+
+ opts.qprio = 3;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 == NULL);
+
+ /* Only 0 ~ 3 qprio is acceptable */
+ opts.qprio = 4;
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
+
+ cleanup_qpairs(&ctrlr);
+}
+
+static void
+test_alloc_io_qpair_wrr_1(void)
+{
+ struct spdk_nvme_io_qpair_opts opts;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_qpair *q0, *q1;
+
+ setup_qpairs(&ctrlr, 2);
+
+ /*
+ * Fake to simulate the controller with weighted round robin
+ * arbitration mechanism.
+ */
+ g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
+
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
+
+ /*
+ * Allocate 2 qpairs and free them
+ */
+ opts.qprio = 0;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
+
+ opts.qprio = 1;
+ q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q1 != NULL);
+ SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+
+ /*
+ * Allocate 2 qpairs and free them in the reverse order
+ */
+ opts.qprio = 2;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 2);
+
+ opts.qprio = 3;
+ q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q1 != NULL);
+ SPDK_CU_ASSERT_FATAL(q1->qprio == 3);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
+
+ /* Only 0 ~ 3 qprio is acceptable */
+ opts.qprio = 4;
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
+
+ cleanup_qpairs(&ctrlr);
+}
+
+static void
+test_alloc_io_qpair_wrr_2(void)
+{
+ struct spdk_nvme_io_qpair_opts opts;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_qpair *q0, *q1, *q2, *q3;
+
+ setup_qpairs(&ctrlr, 4);
+
+ /*
+ * Fake to simulate the controller with weighted round robin
+ * arbitration mechanism.
+ */
+ g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
+
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
+
+ opts.qprio = 0;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
+
+ opts.qprio = 1;
+ q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q1 != NULL);
+ SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
+
+ opts.qprio = 2;
+ q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q2 != NULL);
+ SPDK_CU_ASSERT_FATAL(q2->qprio == 2);
+
+ opts.qprio = 3;
+ q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q3 != NULL);
+ SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
+
+ /* Only 4 I/O qpairs was allocated, so this should fail */
+ opts.qprio = 0;
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+
+ /*
+ * Now that the qpair has been returned to the free list,
+ * we should be able to allocate it again.
+ *
+ * Allocate 4 I/O qpairs and half of them with same qprio.
+ */
+ opts.qprio = 1;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 1);
+
+ opts.qprio = 1;
+ q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q1 != NULL);
+ SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
+
+ opts.qprio = 3;
+ q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q2 != NULL);
+ SPDK_CU_ASSERT_FATAL(q2->qprio == 3);
+
+ opts.qprio = 3;
+ q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q3 != NULL);
+ SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
+
+ /*
+ * Free all I/O qpairs in reverse order
+ */
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
+
+ cleanup_qpairs(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_fail(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ ctrlr.opts.num_io_queues = 0;
+ nvme_ctrlr_fail(&ctrlr, false);
+
+ CU_ASSERT(ctrlr.is_failed == true);
+}
+
+static void
+test_nvme_ctrlr_construct_intel_support_log_page_list(void)
+{
+ bool res;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_intel_log_page_directory payload = {};
+ struct spdk_pci_id pci_id = {};
+
+ /* Get quirks for a device with all 0 vendor/device id */
+ ctrlr.quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT(ctrlr.quirks == 0);
+
+ nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
+ CU_ASSERT(res == false);
+
+ /* Set the vendor to Intel, but provide no device id */
+ ctrlr.cdata.vid = pci_id.vendor_id = SPDK_PCI_VID_INTEL;
+ payload.temperature_statistics_log_len = 1;
+ ctrlr.quirks = nvme_get_quirks(&pci_id);
+ memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
+
+ nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
+ CU_ASSERT(res == false);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
+ CU_ASSERT(res == false);
+
+ /* set valid vendor id, device id and sub device id */
+ ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
+ payload.temperature_statistics_log_len = 0;
+ pci_id.vendor_id = SPDK_PCI_VID_INTEL;
+ pci_id.device_id = 0x0953;
+ pci_id.subvendor_id = SPDK_PCI_VID_INTEL;
+ pci_id.subdevice_id = 0x3702;
+ ctrlr.quirks = nvme_get_quirks(&pci_id);
+ memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
+
+ nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
+ CU_ASSERT(res == false);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
+ CU_ASSERT(res == false);
+}
+
+static void
+test_nvme_ctrlr_set_supported_features(void)
+{
+ bool res;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ /* set a invalid vendor id */
+ ctrlr.cdata.vid = 0xFFFF;
+ nvme_ctrlr_set_supported_features(&ctrlr);
+ res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
+ CU_ASSERT(res == false);
+
+ ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
+ nvme_ctrlr_set_supported_features(&ctrlr);
+ res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
+ CU_ASSERT(res == true);
+}
+
+static void
+test_ctrlr_get_default_ctrlr_opts(void)
+{
+ struct spdk_nvme_ctrlr_opts opts = {};
+
+ CU_ASSERT(spdk_uuid_parse(&g_spdk_nvme_driver->default_extended_host_id,
+ "e53e9258-c93b-48b5-be1a-f025af6d232a") == 0);
+
+ memset(&opts, 0, sizeof(opts));
+
+ /* set a smaller opts_size */
+ CU_ASSERT(sizeof(opts) > 8);
+ spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, 8);
+ CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
+ CU_ASSERT_TRUE(opts.use_cmb_sqs);
+ /* check below fields are not initialized by default value */
+ CU_ASSERT_EQUAL(opts.arb_mechanism, 0);
+ CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 0);
+ CU_ASSERT_EQUAL(opts.io_queue_size, 0);
+ CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
+ for (int i = 0; i < 8; i++) {
+ CU_ASSERT(opts.host_id[i] == 0);
+ }
+ for (int i = 0; i < 16; i++) {
+ CU_ASSERT(opts.extended_host_id[i] == 0);
+ }
+ CU_ASSERT(strlen(opts.hostnqn) == 0);
+ CU_ASSERT(strlen(opts.src_addr) == 0);
+ CU_ASSERT(strlen(opts.src_svcid) == 0);
+
+ /* set a consistent opts_size */
+ spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, sizeof(opts));
+ CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
+ CU_ASSERT_TRUE(opts.use_cmb_sqs);
+ CU_ASSERT_EQUAL(opts.arb_mechanism, SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 10 * 1000);
+ CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
+ CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
+ for (int i = 0; i < 8; i++) {
+ CU_ASSERT(opts.host_id[i] == 0);
+ }
+ CU_ASSERT_STRING_EQUAL(opts.hostnqn,
+ "2014-08.org.nvmexpress:uuid:e53e9258-c93b-48b5-be1a-f025af6d232a");
+ CU_ASSERT(memcmp(opts.extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
+ sizeof(opts.extended_host_id)) == 0);
+ CU_ASSERT(strlen(opts.src_addr) == 0);
+ CU_ASSERT(strlen(opts.src_svcid) == 0);
+}
+
+static void
+test_ctrlr_get_default_io_qpair_opts(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_io_qpair_opts opts = {};
+
+ memset(&opts, 0, sizeof(opts));
+
+ /* set a smaller opts_size */
+ ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
+ CU_ASSERT(sizeof(opts) > 8);
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, 8);
+ CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
+ CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
+ /* check below field is not initialized by default value */
+ CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
+
+ /* set a consistent opts_size */
+ ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
+ ctrlr.opts.io_queue_requests = DEFAULT_IO_QUEUE_REQUESTS;
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
+ CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
+ CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
+ CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
+}
+
+#if 0 /* TODO: move to PCIe-specific unit test */
+static void
+test_nvme_ctrlr_alloc_cmb(void)
+{
+ int rc;
+ uint64_t offset;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ ctrlr.cmb_size = 0x1000000;
+ ctrlr.cmb_current_offset = 0x100;
+ rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x200, 0x1000, &offset);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(offset == 0x1000);
+ CU_ASSERT(ctrlr.cmb_current_offset == 0x1200);
+
+ rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800, 0x1000, &offset);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(offset == 0x2000);
+ CU_ASSERT(ctrlr.cmb_current_offset == 0x2800);
+
+ rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800000, 0x100000, &offset);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(offset == 0x100000);
+ CU_ASSERT(ctrlr.cmb_current_offset == 0x900000);
+
+ rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x8000000, 0x1000, &offset);
+ CU_ASSERT(rc == -1);
+}
+#endif
+
+static void
+test_spdk_nvme_ctrlr_update_firmware(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+ void *payload = NULL;
+ int point_payload = 1;
+ int slot = 0;
+ int ret = 0;
+ struct spdk_nvme_status status;
+ enum spdk_nvme_fw_commit_action commit_action = SPDK_NVME_FW_COMMIT_REPLACE_IMG;
+
+ /* Set invalid size check function return value */
+ set_size = 5;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -1);
+
+ /* When payload is NULL but set_size < min_page_size */
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -1);
+
+ /* When payload not NULL but min_page_size is 0 */
+ set_size = 4;
+ ctrlr.min_page_size = 0;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -1);
+
+ /* Check firmware image download when payload not NULL and min_page_size not 0 , status.cpl value is 1 */
+ set_status_cpl = 1;
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -ENXIO);
+
+ /* Check firmware image download and set status.cpl value is 0 */
+ set_status_cpl = 0;
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -1);
+
+ /* Check firmware commit */
+ ctrlr.is_resetting = false;
+ set_status_cpl = 0;
+ slot = 1;
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -ENXIO);
+
+ /* Set size check firmware download and firmware commit */
+ ctrlr.is_resetting = true;
+ set_status_cpl = 0;
+ slot = 1;
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == 0);
+
+ set_status_cpl = 0;
+}
+
+int
+nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl_success(cb_fn, cb_arg);
+ return 0;
+}
+
+static void
+test_spdk_nvme_ctrlr_doorbell_buffer_config(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+ int ret = -1;
+
+ ctrlr.cdata.oacs.doorbell_buffer_config = 1;
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ ctrlr.page_size = 0x1000;
+ MOCK_CLEAR(spdk_malloc)
+ MOCK_CLEAR(spdk_zmalloc)
+ MOCK_CLEAR(spdk_dma_malloc)
+ MOCK_CLEAR(spdk_dma_zmalloc)
+ ret = nvme_ctrlr_set_doorbell_buffer_config(&ctrlr);
+ CU_ASSERT(ret == 0);
+ nvme_ctrlr_free_doorbell_buffer(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_test_active_ns(void)
+{
+ uint32_t nsid, minor;
+ size_t ns_id_count;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ ctrlr.page_size = 0x1000;
+
+ for (minor = 0; minor <= 2; minor++) {
+ ctrlr.cdata.ver.bits.mjr = 1;
+ ctrlr.cdata.ver.bits.mnr = minor;
+ ctrlr.cdata.ver.bits.ter = 0;
+ ctrlr.num_ns = 1531;
+ nvme_ctrlr_identify_active_ns(&ctrlr);
+
+ for (nsid = 1; nsid <= ctrlr.num_ns; nsid++) {
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
+ }
+ ctrlr.num_ns = 1559;
+ for (; nsid <= ctrlr.num_ns; nsid++) {
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == false);
+ }
+ ctrlr.num_ns = 1531;
+ for (nsid = 0; nsid < ctrlr.num_ns; nsid++) {
+ ctrlr.active_ns_list[nsid] = 0;
+ }
+ CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 0);
+
+ ctrlr.active_ns_list[0] = 1;
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
+ nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
+ CU_ASSERT(nsid == 1);
+
+ ctrlr.active_ns_list[1] = 3;
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3) == true);
+ nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
+ CU_ASSERT(nsid == 3);
+ nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
+ CU_ASSERT(nsid == 0);
+
+ memset(ctrlr.active_ns_list, 0, ctrlr.num_ns);
+ for (nsid = 0; nsid < ctrlr.num_ns; nsid++) {
+ ctrlr.active_ns_list[nsid] = nsid + 1;
+ }
+
+ ns_id_count = 0;
+ for (nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
+ nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid)) {
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
+ ns_id_count++;
+ }
+ CU_ASSERT(ns_id_count == ctrlr.num_ns);
+
+ nvme_ctrlr_destruct(&ctrlr);
+ }
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme_ctrlr", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test nvme_ctrlr init CC.EN = 1 CSTS.RDY = 0",
+ test_nvme_ctrlr_init_en_1_rdy_0) == NULL
+ || CU_add_test(suite, "test nvme_ctrlr init CC.EN = 1 CSTS.RDY = 1",
+ test_nvme_ctrlr_init_en_1_rdy_1) == NULL
+ || CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0",
+ test_nvme_ctrlr_init_en_0_rdy_0) == NULL
+ || CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 1",
+ test_nvme_ctrlr_init_en_0_rdy_1) == NULL
+ || CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0 AMS = RR",
+ test_nvme_ctrlr_init_en_0_rdy_0_ams_rr) == NULL
+ || CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0 AMS = WRR",
+ test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr) == NULL
+ || CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0 AMS = VS",
+ test_nvme_ctrlr_init_en_0_rdy_0_ams_vs) == NULL
+ || CU_add_test(suite, "alloc_io_qpair_rr 1", test_alloc_io_qpair_rr_1) == NULL
+ || CU_add_test(suite, "get_default_ctrlr_opts", test_ctrlr_get_default_ctrlr_opts) == NULL
+ || CU_add_test(suite, "get_default_io_qpair_opts", test_ctrlr_get_default_io_qpair_opts) == NULL
+ || CU_add_test(suite, "alloc_io_qpair_wrr 1", test_alloc_io_qpair_wrr_1) == NULL
+ || CU_add_test(suite, "alloc_io_qpair_wrr 2", test_alloc_io_qpair_wrr_2) == NULL
+ || CU_add_test(suite, "test nvme ctrlr function update_firmware",
+ test_spdk_nvme_ctrlr_update_firmware) == NULL
+ || CU_add_test(suite, "test nvme_ctrlr function nvme_ctrlr_fail", test_nvme_ctrlr_fail) == NULL
+ || CU_add_test(suite, "test nvme ctrlr function nvme_ctrlr_construct_intel_support_log_page_list",
+ test_nvme_ctrlr_construct_intel_support_log_page_list) == NULL
+ || CU_add_test(suite, "test nvme ctrlr function nvme_ctrlr_set_supported_features",
+ test_nvme_ctrlr_set_supported_features) == NULL
+ || CU_add_test(suite, "test nvme ctrlr function nvme_ctrlr_set_doorbell_buffer_config",
+ test_spdk_nvme_ctrlr_doorbell_buffer_config) == NULL
+#if 0 /* TODO: move to PCIe-specific unit test */
+ || CU_add_test(suite, "test nvme ctrlr function nvme_ctrlr_alloc_cmb",
+ test_nvme_ctrlr_alloc_cmb) == NULL
+#endif
+ || CU_add_test(suite, "test nvme ctrlr function test_nvme_ctrlr_test_active_ns",
+ test_nvme_ctrlr_test_active_ns) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/.gitignore
new file mode 100644
index 00000000..1568b476
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/.gitignore
@@ -0,0 +1 @@
+nvme_ctrlr_cmd_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/Makefile
new file mode 100644
index 00000000..5c647dd3
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ctrlr_cmd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut.c
new file mode 100644
index 00000000..8cbc4476
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut.c
@@ -0,0 +1,645 @@
+
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_ctrlr_cmd.c"
+
+#define CTRLR_CDATA_ELPE 5
+
+pid_t g_spdk_nvme_pid;
+
+struct nvme_request g_req;
+
+uint32_t error_num_entries;
+uint32_t health_log_nsid = 1;
+uint8_t feature = 1;
+uint32_t feature_cdw11 = 1;
+uint32_t feature_cdw12 = 1;
+uint8_t get_feature = 1;
+uint32_t get_feature_cdw11 = 1;
+uint32_t fw_img_size = 1024;
+uint32_t fw_img_offset = 0;
+uint16_t abort_cid = 1;
+uint16_t abort_sqid = 1;
+uint32_t namespace_management_nsid = 1;
+uint32_t format_nvme_nsid = 1;
+
+uint32_t expected_feature_ns = 2;
+uint32_t expected_feature_cdw10 = SPDK_NVME_FEAT_LBA_RANGE_TYPE;
+uint32_t expected_feature_cdw11 = 1;
+uint32_t expected_feature_cdw12 = 1;
+
+typedef void (*verify_request_fn_t)(struct nvme_request *req);
+verify_request_fn_t verify_fn;
+
+static void verify_firmware_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(req->cmd.nsid == SPDK_NVME_GLOBAL_NS_TAG);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_firmware_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_LOG_FIRMWARE_SLOT;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_health_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(req->cmd.nsid == health_log_nsid);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_health_information_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_LOG_HEALTH_INFORMATION;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_error_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(req->cmd.nsid == SPDK_NVME_GLOBAL_NS_TAG);
+
+ temp_cdw10 = (((sizeof(struct spdk_nvme_error_information_entry) * error_num_entries) /
+ sizeof(uint32_t) - 1) << 16) | SPDK_NVME_LOG_ERROR;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_set_feature_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_SET_FEATURES);
+ CU_ASSERT(req->cmd.cdw10 == feature);
+ CU_ASSERT(req->cmd.cdw11 == feature_cdw11);
+ CU_ASSERT(req->cmd.cdw12 == feature_cdw12);
+}
+
+static void verify_set_feature_ns_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_SET_FEATURES);
+ CU_ASSERT(req->cmd.cdw10 == expected_feature_cdw10);
+ CU_ASSERT(req->cmd.cdw11 == expected_feature_cdw11);
+ CU_ASSERT(req->cmd.cdw12 == expected_feature_cdw12);
+ CU_ASSERT(req->cmd.nsid == expected_feature_ns);
+}
+
+static void verify_get_feature_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_FEATURES);
+ CU_ASSERT(req->cmd.cdw10 == get_feature);
+ CU_ASSERT(req->cmd.cdw11 == get_feature_cdw11);
+}
+
+static void verify_get_feature_ns_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_FEATURES);
+ CU_ASSERT(req->cmd.cdw10 == expected_feature_cdw10);
+ CU_ASSERT(req->cmd.cdw11 == expected_feature_cdw11);
+ CU_ASSERT(req->cmd.nsid == expected_feature_ns);
+}
+
+static void verify_abort_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ABORT);
+ CU_ASSERT(req->cmd.cdw10 == (((uint32_t)abort_cid << 16) | abort_sqid));
+}
+
+static void verify_io_raw_cmd(struct nvme_request *req)
+{
+ struct spdk_nvme_cmd command = {};
+
+ CU_ASSERT(memcmp(&req->cmd, &command, sizeof(req->cmd)) == 0);
+}
+
+static void verify_io_raw_cmd_with_md(struct nvme_request *req)
+{
+ struct spdk_nvme_cmd command = {};
+
+ CU_ASSERT(memcmp(&req->cmd, &command, sizeof(req->cmd)) == 0);
+}
+
+static void verify_intel_smart_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(req->cmd.nsid == health_log_nsid);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_smart_information_page) /
+ sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_SMART;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_temperature_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_temperature_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_TEMPERATURE;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_read_latency_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_rw_latency_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_write_latency_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_rw_latency_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_get_log_page_directory(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_log_page_directory) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_marketing_description_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_marketing_description_page) / sizeof(
+ uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_MARKETING_DESCRIPTION;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_namespace_attach(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_NS_ATTACHMENT);
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_NS_CTRLR_ATTACH);
+ CU_ASSERT(req->cmd.nsid == namespace_management_nsid);
+}
+
+static void verify_namespace_detach(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_NS_ATTACHMENT);
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_NS_CTRLR_DETACH);
+ CU_ASSERT(req->cmd.nsid == namespace_management_nsid);
+}
+
+static void verify_namespace_create(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_NS_MANAGEMENT);
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_NS_MANAGEMENT_CREATE);
+ CU_ASSERT(req->cmd.nsid == 0);
+}
+
+static void verify_namespace_delete(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_NS_MANAGEMENT);
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_NS_MANAGEMENT_DELETE);
+ CU_ASSERT(req->cmd.nsid == namespace_management_nsid);
+}
+
+static void verify_format_nvme(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_FORMAT_NVM);
+ CU_ASSERT(req->cmd.cdw10 == 0);
+ CU_ASSERT(req->cmd.nsid == format_nvme_nsid);
+}
+
+static void verify_fw_commit(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_FIRMWARE_COMMIT);
+ CU_ASSERT(req->cmd.cdw10 == 0x09);
+}
+
+static void verify_fw_image_download(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD);
+ CU_ASSERT(req->cmd.cdw10 == (fw_img_size >> 2) - 1);
+ CU_ASSERT(req->cmd.cdw11 == fw_img_offset >> 2);
+}
+
+struct nvme_request *
+nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair, void *buffer, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller)
+{
+ /* For the unit test, we don't actually need to copy the buffer */
+ return nvme_allocate_request_contig(qpair, buffer, payload_size, cb_fn, cb_arg);
+}
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ verify_fn(req);
+ /* stop analyzer from thinking stack variable addresses are stored in a global */
+ memset(req, 0, sizeof(*req));
+
+ return 0;
+}
+
+int
+nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr, struct nvme_request *req)
+{
+ verify_fn(req);
+ /* stop analyzer from thinking stack variable addresses are stored in a global */
+ memset(req, 0, sizeof(*req));
+
+ return 0;
+}
+
+#define DECLARE_AND_CONSTRUCT_CTRLR() \
+ struct spdk_nvme_ctrlr ctrlr = {}; \
+ struct spdk_nvme_qpair adminq = {}; \
+ struct nvme_request req; \
+ \
+ STAILQ_INIT(&adminq.free_req); \
+ STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq); \
+ ctrlr.adminq = &adminq;
+
+static void
+test_firmware_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_firmware_page payload = {};
+
+ verify_fn = verify_firmware_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_FIRMWARE_SLOT, SPDK_NVME_GLOBAL_NS_TAG,
+ &payload,
+ sizeof(payload), 0, NULL, NULL);
+}
+
+static void
+test_health_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_health_information_page payload = {};
+
+ verify_fn = verify_health_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION, health_log_nsid,
+ &payload,
+ sizeof(payload), 0, NULL, NULL);
+}
+
+static void
+test_error_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_error_information_entry payload = {};
+
+ ctrlr.cdata.elpe = CTRLR_CDATA_ELPE;
+
+ verify_fn = verify_error_log_page;
+
+ /* valid page */
+ error_num_entries = 1;
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_ERROR, SPDK_NVME_GLOBAL_NS_TAG, &payload,
+ sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_smart_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_smart_information_page payload = {};
+
+ verify_fn = verify_intel_smart_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_SMART, health_log_nsid, &payload,
+ sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_temperature_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_temperature_page payload = {};
+
+ verify_fn = verify_intel_temperature_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE, SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_read_latency_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_rw_latency_page payload = {};
+
+ verify_fn = verify_intel_read_latency_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY,
+ SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_write_latency_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_rw_latency_page payload = {};
+
+ verify_fn = verify_intel_write_latency_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY,
+ SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_get_log_page_directory(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_log_page_directory payload = {};
+
+ verify_fn = verify_intel_get_log_page_directory;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY,
+ SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_marketing_description_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_marketing_description_page payload = {};
+
+ verify_fn = verify_intel_marketing_description_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_MARKETING_DESCRIPTION,
+ SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_generic_get_log_pages(void)
+{
+ test_error_get_log_page();
+ test_health_get_log_page();
+ test_firmware_get_log_page();
+}
+
+static void test_intel_get_log_pages(void)
+{
+ test_intel_get_log_page_directory();
+ test_intel_smart_get_log_page();
+ test_intel_temperature_get_log_page();
+ test_intel_read_latency_get_log_page();
+ test_intel_write_latency_get_log_page();
+ test_intel_marketing_description_get_log_page();
+}
+
+static void
+test_set_feature_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_set_feature_cmd;
+
+ spdk_nvme_ctrlr_cmd_set_feature(&ctrlr, feature, feature_cdw11, feature_cdw12, NULL, 0, NULL, NULL);
+}
+
+static void
+test_get_feature_ns_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_get_feature_ns_cmd;
+
+ spdk_nvme_ctrlr_cmd_get_feature_ns(&ctrlr, expected_feature_cdw10,
+ expected_feature_cdw11, NULL, 0,
+ NULL, NULL, expected_feature_ns);
+}
+
+static void
+test_set_feature_ns_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_set_feature_ns_cmd;
+
+ spdk_nvme_ctrlr_cmd_set_feature_ns(&ctrlr, expected_feature_cdw10,
+ expected_feature_cdw11, expected_feature_cdw12,
+ NULL, 0, NULL, NULL, expected_feature_ns);
+}
+
+static void
+test_get_feature_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_get_feature_cmd;
+
+ spdk_nvme_ctrlr_cmd_get_feature(&ctrlr, get_feature, get_feature_cdw11, NULL, 0, NULL, NULL);
+}
+
+static void
+test_abort_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_qpair qpair = {};
+
+ STAILQ_INIT(&ctrlr.queued_aborts);
+
+ verify_fn = verify_abort_cmd;
+
+ qpair.id = abort_sqid;
+ spdk_nvme_ctrlr_cmd_abort(&ctrlr, &qpair, abort_cid, NULL, NULL);
+}
+
+static void
+test_io_raw_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_cmd cmd = {};
+
+ verify_fn = verify_io_raw_cmd;
+
+ spdk_nvme_ctrlr_cmd_io_raw(&ctrlr, &qpair, &cmd, NULL, 1, NULL, NULL);
+}
+
+static void
+test_io_raw_cmd_with_md(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_cmd cmd = {};
+
+ verify_fn = verify_io_raw_cmd_with_md;
+
+ spdk_nvme_ctrlr_cmd_io_raw_with_md(&ctrlr, &qpair, &cmd, NULL, 1, NULL, NULL, NULL);
+}
+
+static void
+test_get_log_pages(void)
+{
+ test_generic_get_log_pages();
+ test_intel_get_log_pages();
+}
+
+static void
+test_namespace_attach(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_ctrlr_list payload = {};
+
+ verify_fn = verify_namespace_attach;
+
+ nvme_ctrlr_cmd_attach_ns(&ctrlr, namespace_management_nsid, &payload, NULL, NULL);
+}
+
+static void
+test_namespace_detach(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_ctrlr_list payload = {};
+
+ verify_fn = verify_namespace_detach;
+
+ nvme_ctrlr_cmd_detach_ns(&ctrlr, namespace_management_nsid, &payload, NULL, NULL);
+}
+
+static void
+test_namespace_create(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_ns_data payload = {};
+
+ verify_fn = verify_namespace_create;
+ nvme_ctrlr_cmd_create_ns(&ctrlr, &payload, NULL, NULL);
+}
+
+static void
+test_namespace_delete(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_namespace_delete;
+ nvme_ctrlr_cmd_delete_ns(&ctrlr, namespace_management_nsid, NULL, NULL);
+}
+
+static void
+test_format_nvme(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_format format = {};
+
+ verify_fn = verify_format_nvme;
+
+ nvme_ctrlr_cmd_format(&ctrlr, format_nvme_nsid, &format, NULL, NULL);
+}
+
+static void
+test_fw_commit(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_fw_commit fw_commit = {};
+
+ fw_commit.ca = SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG;
+ fw_commit.fs = 1;
+
+ verify_fn = verify_fw_commit;
+
+ nvme_ctrlr_cmd_fw_commit(&ctrlr, &fw_commit, NULL, NULL);
+}
+
+static void
+test_fw_image_download(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_fw_image_download;
+
+ nvme_ctrlr_cmd_fw_image_download(&ctrlr, fw_img_size, fw_img_offset, NULL,
+ NULL, NULL);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme_ctrlr_cmd", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test ctrlr cmd get_log_pages", test_get_log_pages) == NULL
+ || CU_add_test(suite, "test ctrlr cmd set_feature", test_set_feature_cmd) == NULL
+ || CU_add_test(suite, "test ctrlr cmd set_feature_ns", test_set_feature_ns_cmd) == NULL
+ || CU_add_test(suite, "test ctrlr cmd get_feature", test_get_feature_cmd) == NULL
+ || CU_add_test(suite, "test ctrlr cmd get_feature_ns", test_get_feature_ns_cmd) == NULL
+ || CU_add_test(suite, "test ctrlr cmd abort_cmd", test_abort_cmd) == NULL
+ || CU_add_test(suite, "test ctrlr cmd io_raw_cmd", test_io_raw_cmd) == NULL
+ || CU_add_test(suite, "test ctrlr cmd io_raw_cmd_with_md", test_io_raw_cmd_with_md) == NULL
+ || CU_add_test(suite, "test ctrlr cmd namespace_attach", test_namespace_attach) == NULL
+ || CU_add_test(suite, "test ctrlr cmd namespace_detach", test_namespace_detach) == NULL
+ || CU_add_test(suite, "test ctrlr cmd namespace_create", test_namespace_create) == NULL
+ || CU_add_test(suite, "test ctrlr cmd namespace_delete", test_namespace_delete) == NULL
+ || CU_add_test(suite, "test ctrlr cmd format_nvme", test_format_nvme) == NULL
+ || CU_add_test(suite, "test ctrlr cmd fw_commit", test_fw_commit) == NULL
+ || CU_add_test(suite, "test ctrlr cmd fw_image_download", test_fw_image_download) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/.gitignore
new file mode 100644
index 00000000..2813105d
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/.gitignore
@@ -0,0 +1 @@
+nvme_ctrlr_ocssd_cmd_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/Makefile
new file mode 100644
index 00000000..9446b8d5
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ctrlr_ocssd_cmd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut.c
new file mode 100644
index 00000000..98eccf34
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut.c
@@ -0,0 +1,116 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_ctrlr_ocssd_cmd.c"
+
+#define DECLARE_AND_CONSTRUCT_CTRLR() \
+ struct spdk_nvme_ctrlr ctrlr = {}; \
+ struct spdk_nvme_qpair adminq = {}; \
+ struct nvme_request req; \
+ \
+ STAILQ_INIT(&adminq.free_req); \
+ STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq); \
+ ctrlr.adminq = &adminq;
+
+pid_t g_spdk_nvme_pid;
+struct nvme_request g_req;
+typedef void (*verify_request_fn_t)(struct nvme_request *req);
+verify_request_fn_t verify_fn;
+
+static const uint32_t expected_geometry_ns = 1;
+
+int
+nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr, struct nvme_request *req)
+{
+ verify_fn(req);
+ memset(req, 0, sizeof(*req));
+ return 0;
+}
+
+struct nvme_request *
+nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair, void *buffer, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller)
+{
+ /* For the unit test, we don't actually need to copy the buffer */
+ return nvme_allocate_request_contig(qpair, buffer, payload_size, cb_fn, cb_arg);
+}
+
+static void verify_geometry_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_OCSSD_OPC_GEOMETRY);
+ CU_ASSERT(req->cmd.nsid == expected_geometry_ns);
+}
+
+static void
+test_geometry_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ struct spdk_ocssd_geometry_data geo;
+
+ verify_fn = verify_geometry_cmd;
+
+ spdk_nvme_ocssd_ctrlr_cmd_geometry(&ctrlr, expected_geometry_ns, &geo,
+ sizeof(geo), NULL, NULL);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme_ctrlr_cmd", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test ocssd ctrlr geometry cmd ", test_geometry_cmd) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ns.c/.gitignore
new file mode 100644
index 00000000..ada0ec86
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns.c/.gitignore
@@ -0,0 +1 @@
+nvme_ns_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ns.c/Makefile
new file mode 100644
index 00000000..add85ee9
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ns_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns.c/nvme_ns_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ns.c/nvme_ns_ut.c
new file mode 100644
index 00000000..cdfb4951
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns.c/nvme_ns_ut.c
@@ -0,0 +1,163 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "spdk/env.h"
+
+#include "nvme/nvme_ns.c"
+
+#include "common/lib/test_env.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME)
+
+DEFINE_STUB(spdk_nvme_wait_for_completion_robust_lock, int,
+ (struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status,
+ pthread_mutex_t *robust_mutex), 0);
+
+int
+nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
+ void *payload, size_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return -1;
+}
+
+void
+nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+}
+
+int32_t
+spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ return -1;
+}
+
+static void
+test_nvme_ns_construct(void)
+{
+ struct spdk_nvme_ns ns = {};
+ uint32_t id = 1;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ nvme_ns_construct(&ns, id, &ctrlr);
+ CU_ASSERT(ns.id == 1);
+}
+
+static void
+test_nvme_ns_uuid(void)
+{
+ struct spdk_nvme_ns ns = {};
+ const struct spdk_uuid *uuid;
+ struct spdk_uuid expected_uuid;
+
+ memset(&expected_uuid, 0xA5, sizeof(expected_uuid));
+
+ /* Empty list - no UUID should be found */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ CU_ASSERT(uuid == NULL);
+
+ /* NGUID only (no UUID in list) */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ ns.id_desc_list[0] = 0x02; /* NIDT == NGUID */
+ ns.id_desc_list[1] = 0x10; /* NIDL */
+ memset(&ns.id_desc_list[4], 0xCC, 0x10);
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ CU_ASSERT(uuid == NULL);
+
+ /* Just UUID in the list */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ ns.id_desc_list[0] = 0x03; /* NIDT == UUID */
+ ns.id_desc_list[1] = 0x10; /* NIDL */
+ memcpy(&ns.id_desc_list[4], &expected_uuid, sizeof(expected_uuid));
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ SPDK_CU_ASSERT_FATAL(uuid != NULL);
+ CU_ASSERT(memcmp(uuid, &expected_uuid, sizeof(*uuid)) == 0);
+
+ /* UUID followed by NGUID */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ ns.id_desc_list[0] = 0x03; /* NIDT == UUID */
+ ns.id_desc_list[1] = 0x10; /* NIDL */
+ memcpy(&ns.id_desc_list[4], &expected_uuid, sizeof(expected_uuid));
+ ns.id_desc_list[20] = 0x02; /* NIDT == NGUID */
+ ns.id_desc_list[21] = 0x10; /* NIDL */
+ memset(&ns.id_desc_list[24], 0xCC, 0x10);
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ SPDK_CU_ASSERT_FATAL(uuid != NULL);
+ CU_ASSERT(memcmp(uuid, &expected_uuid, sizeof(*uuid)) == 0);
+
+ /* NGUID followed by UUID */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ ns.id_desc_list[0] = 0x02; /* NIDT == NGUID */
+ ns.id_desc_list[1] = 0x10; /* NIDL */
+ memset(&ns.id_desc_list[4], 0xCC, 0x10);
+ ns.id_desc_list[20] = 0x03; /* NIDT = UUID */
+ ns.id_desc_list[21] = 0x10; /* NIDL */
+ memcpy(&ns.id_desc_list[24], &expected_uuid, sizeof(expected_uuid));
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ SPDK_CU_ASSERT_FATAL(uuid != NULL);
+ CU_ASSERT(memcmp(uuid, &expected_uuid, sizeof(*uuid)) == 0);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test_nvme_ns", test_nvme_ns_construct) == NULL ||
+ CU_add_test(suite, "test_nvme_ns_uuid", test_nvme_ns_uuid) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/.gitignore
new file mode 100644
index 00000000..5583ec23
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/.gitignore
@@ -0,0 +1 @@
+nvme_ns_cmd_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/Makefile
new file mode 100644
index 00000000..ff451d72
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ns_cmd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c
new file mode 100644
index 00000000..f17ffa35
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c
@@ -0,0 +1,1440 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_ns_cmd.c"
+#include "nvme/nvme.c"
+
+#include "common/lib/test_env.c"
+
+DEFINE_STUB(spdk_nvme_qpair_process_completions, int32_t,
+ (struct spdk_nvme_qpair *qpair,
+ uint32_t max_completions), 0);
+
+static struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+static struct nvme_request *g_request = NULL;
+
+int
+spdk_pci_nvme_enumerate(spdk_pci_enum_cb enum_cb, void *enum_ctx)
+{
+ return -1;
+}
+
+static void nvme_request_reset_sgl(void *cb_arg, uint32_t sgl_offset)
+{
+}
+
+static int nvme_request_next_sge(void *cb_arg, void **address, uint32_t *length)
+{
+ uint32_t *lba_count = cb_arg;
+
+ /*
+ * We need to set address to something here, since the SGL splitting code will
+ * use it to determine PRP compatibility. Just use a rather arbitrary address
+ * for now - these tests will not actually cause data to be read from or written
+ * to this address.
+ */
+ *address = (void *)(uintptr_t)0x10000000;
+ *length = *lba_count;
+ return 0;
+}
+
+bool
+spdk_nvme_transport_available(enum spdk_nvme_transport_type trtype)
+{
+ return true;
+}
+
+struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ void *devhandle)
+{
+ return NULL;
+}
+
+void
+nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+}
+
+int
+nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+void
+nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
+{
+}
+
+struct spdk_pci_addr
+spdk_pci_device_get_addr(struct spdk_pci_device *pci_dev)
+{
+ struct spdk_pci_addr pci_addr;
+
+ memset(&pci_addr, 0, sizeof(pci_addr));
+ return pci_addr;
+}
+
+struct spdk_pci_id
+spdk_pci_device_get_id(struct spdk_pci_device *pci_dev)
+{
+ struct spdk_pci_id pci_id;
+
+ memset(&pci_id, 0xFF, sizeof(pci_id));
+
+ return pci_id;
+}
+
+void
+spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
+{
+ memset(opts, 0, sizeof(*opts));
+}
+
+uint32_t
+spdk_nvme_ns_get_sector_size(struct spdk_nvme_ns *ns)
+{
+ return ns->sector_size;
+}
+
+uint32_t
+spdk_nvme_ns_get_max_io_xfer_size(struct spdk_nvme_ns *ns)
+{
+ return ns->ctrlr->max_xfer_size;
+}
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ g_request = req;
+
+ return 0;
+}
+
+void
+nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return;
+}
+
+void
+nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return;
+}
+
+int
+nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_scan(const struct spdk_nvme_transport_id *trid,
+ void *cb_ctx,
+ spdk_nvme_probe_cb probe_cb,
+ spdk_nvme_remove_cb remove_cb,
+ bool direct_connect)
+{
+ return 0;
+}
+
+static void
+prepare_for_test(struct spdk_nvme_ns *ns, struct spdk_nvme_ctrlr *ctrlr,
+ struct spdk_nvme_qpair *qpair,
+ uint32_t sector_size, uint32_t md_size, uint32_t max_xfer_size,
+ uint32_t stripe_size, bool extended_lba)
+{
+ uint32_t num_requests = 32;
+ uint32_t i;
+
+ ctrlr->max_xfer_size = max_xfer_size;
+ /*
+ * Clear the flags field - we especially want to make sure the SGL_SUPPORTED flag is not set
+ * so that we test the SGL splitting path.
+ */
+ ctrlr->flags = 0;
+ ctrlr->min_page_size = 4096;
+ ctrlr->page_size = 4096;
+ memset(&ctrlr->opts, 0, sizeof(ctrlr->opts));
+ memset(ns, 0, sizeof(*ns));
+ ns->ctrlr = ctrlr;
+ ns->sector_size = sector_size;
+ ns->extended_lba_size = sector_size;
+ if (extended_lba) {
+ ns->flags |= SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED;
+ ns->extended_lba_size += md_size;
+ }
+ ns->md_size = md_size;
+ ns->sectors_per_max_io = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->extended_lba_size;
+ ns->sectors_per_stripe = stripe_size / ns->extended_lba_size;
+
+ memset(qpair, 0, sizeof(*qpair));
+ qpair->ctrlr = ctrlr;
+ qpair->req_buf = calloc(num_requests, sizeof(struct nvme_request));
+ SPDK_CU_ASSERT_FATAL(qpair->req_buf != NULL);
+
+ for (i = 0; i < num_requests; i++) {
+ struct nvme_request *req = qpair->req_buf + i * sizeof(struct nvme_request);
+
+ STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
+ }
+
+ g_request = NULL;
+}
+
+static void
+cleanup_after_test(struct spdk_nvme_qpair *qpair)
+{
+ free(qpair->req_buf);
+}
+
+static void
+nvme_cmd_interpret_rw(const struct spdk_nvme_cmd *cmd,
+ uint64_t *lba, uint32_t *num_blocks)
+{
+ *lba = *(const uint64_t *)&cmd->cdw10;
+ *num_blocks = (cmd->cdw12 & 0xFFFFu) + 1;
+}
+
+static void
+split_test(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_ctrlr ctrlr;
+ void *payload;
+ uint64_t lba, cmd_lba;
+ uint32_t lba_count, cmd_lba_count;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(512);
+ lba = 0;
+ lba_count = 1;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+
+ CU_ASSERT(g_request->num_children == 0);
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(cmd_lba == lba);
+ CU_ASSERT(cmd_lba_count == lba_count);
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+split_test2(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request *child;
+ void *payload;
+ uint64_t lba, cmd_lba;
+ uint32_t lba_count, cmd_lba_count;
+ int rc;
+
+ /*
+ * Controller has max xfer of 128 KB (256 blocks).
+ * Submit an I/O of 256 KB starting at LBA 0, which should be split
+ * on the max I/O boundary into two I/Os of 128 KB.
+ */
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(256 * 1024);
+ lba = 0;
+ lba_count = (256 * 1024) / 512;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+
+ CU_ASSERT(g_request->num_children == 2);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(cmd_lba == 0);
+ CU_ASSERT(cmd_lba_count == 256); /* 256 * 512 byte blocks = 128 KB */
+ nvme_free_request(child);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(cmd_lba == 256);
+ CU_ASSERT(cmd_lba_count == 256);
+ nvme_free_request(child);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_request->children));
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+split_test3(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request *child;
+ void *payload;
+ uint64_t lba, cmd_lba;
+ uint32_t lba_count, cmd_lba_count;
+ int rc;
+
+ /*
+ * Controller has max xfer of 128 KB (256 blocks).
+ * Submit an I/O of 256 KB starting at LBA 10, which should be split
+ * into two I/Os:
+ * 1) LBA = 10, count = 256 blocks
+ * 2) LBA = 266, count = 256 blocks
+ */
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(256 * 1024);
+ lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
+ lba_count = (256 * 1024) / 512;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(cmd_lba == 10);
+ CU_ASSERT(cmd_lba_count == 256);
+ nvme_free_request(child);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(cmd_lba == 266);
+ CU_ASSERT(cmd_lba_count == 256);
+ nvme_free_request(child);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_request->children));
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+split_test4(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request *child;
+ void *payload;
+ uint64_t lba, cmd_lba;
+ uint32_t lba_count, cmd_lba_count;
+ int rc;
+
+ /*
+ * Controller has max xfer of 128 KB (256 blocks) and a stripe size of 128 KB.
+ * (Same as split_test3 except with driver-assisted striping enabled.)
+ * Submit an I/O of 256 KB starting at LBA 10, which should be split
+ * into three I/Os:
+ * 1) LBA = 10, count = 246 blocks (less than max I/O size to align to stripe size)
+ * 2) LBA = 256, count = 256 blocks (aligned to stripe size and max I/O size)
+ * 3) LBA = 512, count = 10 blocks (finish off the remaining I/O size)
+ */
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 128 * 1024, false);
+ payload = malloc(256 * 1024);
+ lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
+ lba_count = (256 * 1024) / 512;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 3);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == (256 - 10) * 512);
+ CU_ASSERT(child->payload_offset == 0);
+ CU_ASSERT(cmd_lba == 10);
+ CU_ASSERT(cmd_lba_count == 256 - 10);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
+ nvme_free_request(child);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(child->payload_offset == (256 - 10) * 512);
+ CU_ASSERT(cmd_lba == 256);
+ CU_ASSERT(cmd_lba_count == 256);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
+ nvme_free_request(child);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 10 * 512);
+ CU_ASSERT(child->payload_offset == (512 - 10) * 512);
+ CU_ASSERT(cmd_lba == 512);
+ CU_ASSERT(cmd_lba_count == 10);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
+ nvme_free_request(child);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_request->children));
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_cmd_child_request(void)
+{
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ struct nvme_request *child, *tmp;
+ void *payload;
+ uint64_t lba = 0x1000;
+ uint32_t i = 0;
+ uint32_t offset = 0;
+ uint32_t sector_size = 512;
+ uint32_t max_io_size = 128 * 1024;
+ uint32_t sectors_per_max_io = max_io_size / sector_size;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_io_size, 0, false);
+
+ payload = malloc(128 * 1024);
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io, NULL, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->payload_offset == 0);
+ CU_ASSERT(g_request->num_children == 0);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io - 1, NULL, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->payload_offset == 0);
+ CU_ASSERT(g_request->num_children == 0);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io * 4, NULL, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->num_children == 4);
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, (DEFAULT_IO_QUEUE_REQUESTS + 1) * sector_size,
+ NULL,
+ NULL, 0);
+ SPDK_CU_ASSERT_FATAL(rc == -EINVAL);
+
+ TAILQ_FOREACH_SAFE(child, &g_request->children, child_tailq, tmp) {
+ nvme_request_remove_child(g_request, child);
+ CU_ASSERT(child->payload_offset == offset);
+ CU_ASSERT(child->cmd.opc == SPDK_NVME_OPC_READ);
+ CU_ASSERT(child->cmd.nsid == ns.id);
+ CU_ASSERT(child->cmd.cdw10 == (lba + sectors_per_max_io * i));
+ CU_ASSERT(child->cmd.cdw12 == ((sectors_per_max_io - 1) | 0));
+ offset += max_io_size;
+ nvme_free_request(child);
+ i++;
+ }
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_flush(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_flush(&ns, &qpair, cb_fn, cb_arg);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_FLUSH);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_write_zeroes(void)
+{
+ struct spdk_nvme_ns ns = { 0 };
+ struct spdk_nvme_ctrlr ctrlr = { 0 };
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ uint64_t cmd_lba;
+ uint32_t cmd_lba_count;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_write_zeroes(&ns, &qpair, 0, 2, cb_fn, cb_arg, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE_ZEROES);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT_EQUAL(cmd_lba, 0);
+ CU_ASSERT_EQUAL(cmd_lba_count, 2);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_dataset_management(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ struct spdk_nvme_dsm_range ranges[256];
+ uint16_t i;
+ int rc = 0;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ for (i = 0; i < 256; i++) {
+ ranges[i].starting_lba = i;
+ ranges[i].length = 1;
+ ranges[i].attributes.raw = 0;
+ }
+
+ /* TRIM one LBA */
+ rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
+ ranges, 1, cb_fn, cb_arg);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == 0);
+ CU_ASSERT(g_request->cmd.cdw11 == SPDK_NVME_DSM_ATTR_DEALLOCATE);
+ spdk_dma_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+
+ /* TRIM 256 LBAs */
+ rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
+ ranges, 256, cb_fn, cb_arg);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == 255u);
+ CU_ASSERT(g_request->cmd.cdw11 == SPDK_NVME_DSM_ATTR_DEALLOCATE);
+ spdk_dma_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
+ NULL, 0, cb_fn, cb_arg);
+ CU_ASSERT(rc != 0);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_readv(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ void *cb_arg;
+ uint32_t lba_count = 256;
+ uint32_t sector_size = 512;
+ uint64_t sge_length = lba_count * sector_size;
+
+ cb_arg = malloc(512);
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
+ rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, lba_count, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_READ);
+ CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
+ CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
+ CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0, nvme_request_reset_sgl,
+ NULL);
+ CU_ASSERT(rc != 0);
+
+ free(cb_arg);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_writev(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ void *cb_arg;
+ uint32_t lba_count = 256;
+ uint32_t sector_size = 512;
+ uint64_t sge_length = lba_count * sector_size;
+
+ cb_arg = malloc(512);
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
+ rc = spdk_nvme_ns_cmd_writev(&ns, &qpair, 0x1000, lba_count, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE);
+ CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
+ CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
+ CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ rc = spdk_nvme_ns_cmd_writev(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0,
+ NULL, nvme_request_next_sge);
+ CU_ASSERT(rc != 0);
+
+ free(cb_arg);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_comparev(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ void *cb_arg;
+ uint32_t lba_count = 256;
+ uint32_t sector_size = 512;
+ uint64_t sge_length = lba_count * sector_size;
+
+ cb_arg = malloc(512);
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
+ rc = spdk_nvme_ns_cmd_comparev(&ns, &qpair, 0x1000, lba_count, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_COMPARE);
+ CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
+ CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
+ CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ rc = spdk_nvme_ns_cmd_comparev(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0,
+ nvme_request_reset_sgl, NULL);
+ CU_ASSERT(rc != 0);
+
+ free(cb_arg);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_io_flags(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ void *payload;
+ uint64_t lba;
+ uint32_t lba_count;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 128 * 1024, false);
+ payload = malloc(256 * 1024);
+ lba = 0;
+ lba_count = (4 * 1024) / 512;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
+ CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_LIMITED_RETRY);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) == 0);
+ CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) != 0);
+ nvme_free_request(g_request);
+
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_reservation_register(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_reservation_register_data *payload;
+ bool ignore_key = 1;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc = 0;
+ uint32_t tmp_cdw10;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(sizeof(struct spdk_nvme_reservation_register_data));
+
+ rc = spdk_nvme_ns_cmd_reservation_register(&ns, &qpair, payload, ignore_key,
+ SPDK_NVME_RESERVE_REGISTER_KEY,
+ SPDK_NVME_RESERVE_PTPL_NO_CHANGES,
+ cb_fn, cb_arg);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_REGISTER);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ tmp_cdw10 = SPDK_NVME_RESERVE_REGISTER_KEY;
+ tmp_cdw10 |= ignore_key ? 1 << 3 : 0;
+ tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_PTPL_NO_CHANGES << 30;
+
+ CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
+
+ spdk_dma_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_reservation_release(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_reservation_key_data *payload;
+ bool ignore_key = 1;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc = 0;
+ uint32_t tmp_cdw10;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(sizeof(struct spdk_nvme_reservation_key_data));
+
+ rc = spdk_nvme_ns_cmd_reservation_release(&ns, &qpair, payload, ignore_key,
+ SPDK_NVME_RESERVE_RELEASE,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
+ cb_fn, cb_arg);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_RELEASE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ tmp_cdw10 = SPDK_NVME_RESERVE_RELEASE;
+ tmp_cdw10 |= ignore_key ? 1 << 3 : 0;
+ tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_WRITE_EXCLUSIVE << 8;
+
+ CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
+
+ spdk_dma_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_reservation_acquire(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_reservation_acquire_data *payload;
+ bool ignore_key = 1;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc = 0;
+ uint32_t tmp_cdw10;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(sizeof(struct spdk_nvme_reservation_acquire_data));
+
+ rc = spdk_nvme_ns_cmd_reservation_acquire(&ns, &qpair, payload, ignore_key,
+ SPDK_NVME_RESERVE_ACQUIRE,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
+ cb_fn, cb_arg);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_ACQUIRE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ tmp_cdw10 = SPDK_NVME_RESERVE_ACQUIRE;
+ tmp_cdw10 |= ignore_key ? 1 << 3 : 0;
+ tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_WRITE_EXCLUSIVE << 8;
+
+ CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
+
+ spdk_dma_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_reservation_report(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_reservation_status_data *payload;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc = 0;
+ uint32_t size = sizeof(struct spdk_nvme_reservation_status_data);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ payload = calloc(1, size);
+ SPDK_CU_ASSERT_FATAL(payload != NULL);
+
+ rc = spdk_nvme_ns_cmd_reservation_report(&ns, &qpair, payload, size, cb_fn, cb_arg);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_REPORT);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ CU_ASSERT(g_request->cmd.cdw10 == (size / 4));
+
+ spdk_dma_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_write_with_md(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ char *buffer = NULL;
+ char *metadata = NULL;
+ uint32_t block_size, md_size;
+ struct nvme_request *child0, *child1;
+
+ block_size = 512;
+ md_size = 128;
+
+ buffer = malloc((block_size + md_size) * 384);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ metadata = malloc(md_size * 384);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL, 0, 0,
+ 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * (512 + 128) bytes per block = two I/Os:
+ * child 0: 204 blocks - 204 * (512 + 128) = 127.5 KB
+ * child 1: 52 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL, 0, 0,
+ 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 204 * (512 + 128));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 204 * (512 + 128));
+ CU_ASSERT(child1->payload_size == 52 * (512 + 128));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * No protection information
+ *
+ * 256 blocks * (512 + 8) bytes per block = two I/Os:
+ * child 0: 252 blocks - 252 * (512 + 8) = 127.96875 KB
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL, 0, 0,
+ 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * (512 + 8));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * (512 + 8));
+ CU_ASSERT(child1->payload_size == 4 * (512 + 8));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * Special case for 8-byte metadata + PI + PRACT: no metadata transferred
+ * In theory, 256 blocks * 512 bytes per block = one I/O (128 KB)
+ * However, the splitting code does not account for PRACT when calculating
+ * max sectors per transfer, so we actually get two I/Os:
+ * child 0: 252 blocks
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * 512); /* NOTE: does not include metadata! */
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * 512);
+ CU_ASSERT(child1->payload_size == 4 * 512);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * 384 blocks * 512 bytes = two I/Os:
+ * child 0: 256 blocks
+ * child 1: 128 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 384, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 256 * 512);
+ CU_ASSERT(child0->md_offset == 0);
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload_offset == 256 * 512);
+ CU_ASSERT(child1->payload_size == 128 * 512);
+ CU_ASSERT(child1->md_offset == 256 * 8);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ns_cmd_read_with_md(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ char *buffer = NULL;
+ char *metadata = NULL;
+ uint32_t block_size, md_size;
+
+ block_size = 512;
+ md_size = 128;
+
+ buffer = malloc(block_size * 256);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ metadata = malloc(md_size * 256);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_read_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL, 0, 0,
+ 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ns_cmd_compare_with_md(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ char *buffer = NULL;
+ char *metadata = NULL;
+ uint32_t block_size, md_size;
+ struct nvme_request *child0, *child1;
+
+ block_size = 512;
+ md_size = 128;
+
+ buffer = malloc((block_size + md_size) * 384);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ metadata = malloc(md_size * 384);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256,
+ NULL, NULL, 0, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * (512 + 128) bytes per block = two I/Os:
+ * child 0: 204 blocks - 204 * (512 + 128) = 127.5 KB
+ * child 1: 52 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256,
+ NULL, NULL, 0, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 204 * (512 + 128));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 204 * (512 + 128));
+ CU_ASSERT(child1->payload_size == 52 * (512 + 128));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * No protection information
+ *
+ * 256 blocks * (512 + 8) bytes per block = two I/Os:
+ * child 0: 252 blocks - 252 * (512 + 8) = 127.96875 KB
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256,
+ NULL, NULL, 0, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * (512 + 8));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * (512 + 8));
+ CU_ASSERT(child1->payload_size == 4 * (512 + 8));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * Special case for 8-byte metadata + PI + PRACT: no metadata transferred
+ * In theory, 256 blocks * 512 bytes per block = one I/O (128 KB)
+ * However, the splitting code does not account for PRACT when calculating
+ * max sectors per transfer, so we actually get two I/Os:
+ * child 0: 252 blocks
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256,
+ NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * 512); /* NOTE: does not include metadata! */
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * 512);
+ CU_ASSERT(child1->payload_size == 4 * 512);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256,
+ NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * 384 blocks * 512 bytes = two I/Os:
+ * child 0: 256 blocks
+ * child 1: 128 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 384,
+ NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 256 * 512);
+ CU_ASSERT(child0->md_offset == 0);
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload_offset == 256 * 512);
+ CU_ASSERT(child1->payload_size == 128 * 512);
+ CU_ASSERT(child1->md_offset == 256 * 8);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme_ns_cmd", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "split_test", split_test) == NULL
+ || CU_add_test(suite, "split_test2", split_test2) == NULL
+ || CU_add_test(suite, "split_test3", split_test3) == NULL
+ || CU_add_test(suite, "split_test4", split_test4) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_flush", test_nvme_ns_cmd_flush) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_dataset_management",
+ test_nvme_ns_cmd_dataset_management) == NULL
+ || CU_add_test(suite, "io_flags", test_io_flags) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_write_zeroes", test_nvme_ns_cmd_write_zeroes) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_reservation_register",
+ test_nvme_ns_cmd_reservation_register) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_reservation_release",
+ test_nvme_ns_cmd_reservation_release) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_reservation_acquire",
+ test_nvme_ns_cmd_reservation_acquire) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_reservation_report", test_nvme_ns_cmd_reservation_report) == NULL
+ || CU_add_test(suite, "test_cmd_child_request", test_cmd_child_request) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_readv", test_nvme_ns_cmd_readv) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_read_with_md", test_nvme_ns_cmd_read_with_md) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_writev", test_nvme_ns_cmd_writev) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_write_with_md", test_nvme_ns_cmd_write_with_md) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_comparev", test_nvme_ns_cmd_comparev) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_compare_with_md", test_nvme_ns_cmd_compare_with_md) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ g_spdk_nvme_driver = &_g_nvme_driver;
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/.gitignore
new file mode 100644
index 00000000..8f4f47a1
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/.gitignore
@@ -0,0 +1 @@
+nvme_ns_ocssd_cmd_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/Makefile
new file mode 100644
index 00000000..35fdb83a
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ns_ocssd_cmd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut.c
new file mode 100644
index 00000000..2d13b7a6
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut.c
@@ -0,0 +1,677 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_ns_ocssd_cmd.c"
+#include "nvme/nvme_ns_cmd.c"
+#include "nvme/nvme.c"
+
+#include "common/lib/test_env.c"
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+DEFINE_STUB(spdk_nvme_qpair_process_completions, int32_t,
+ (struct spdk_nvme_qpair *qpair,
+ uint32_t max_completions), 0);
+
+static struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+static struct nvme_request *g_request = NULL;
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ g_request = req;
+
+ return 0;
+}
+
+void
+nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+}
+
+void
+nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return;
+}
+
+
+int
+nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+void
+nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return;
+}
+
+void
+spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
+{
+ memset(opts, 0, sizeof(*opts));
+}
+
+bool
+spdk_nvme_transport_available(enum spdk_nvme_transport_type trtype)
+{
+ return true;
+}
+
+struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ void *devhandle)
+{
+ return NULL;
+}
+
+int
+nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_scan(const struct spdk_nvme_transport_id *trid,
+ void *cb_ctx,
+ spdk_nvme_probe_cb probe_cb,
+ spdk_nvme_remove_cb remove_cb,
+ bool direct_connect)
+{
+ return 0;
+}
+
+uint32_t
+spdk_nvme_ns_get_max_io_xfer_size(struct spdk_nvme_ns *ns)
+{
+ return ns->ctrlr->max_xfer_size;
+}
+
+static void
+prepare_for_test(struct spdk_nvme_ns *ns, struct spdk_nvme_ctrlr *ctrlr,
+ struct spdk_nvme_qpair *qpair,
+ uint32_t sector_size, uint32_t md_size, uint32_t max_xfer_size,
+ uint32_t stripe_size, bool extended_lba)
+{
+ uint32_t num_requests = 32;
+ uint32_t i;
+
+ ctrlr->max_xfer_size = max_xfer_size;
+ /*
+ * Clear the flags field - we especially want to make sure the SGL_SUPPORTED flag is not set
+ * so that we test the SGL splitting path.
+ */
+ ctrlr->flags = 0;
+ ctrlr->min_page_size = PAGE_SIZE;
+ ctrlr->page_size = PAGE_SIZE;
+ memset(&ctrlr->opts, 0, sizeof(ctrlr->opts));
+ memset(ns, 0, sizeof(*ns));
+ ns->ctrlr = ctrlr;
+ ns->sector_size = sector_size;
+ ns->extended_lba_size = sector_size;
+ if (extended_lba) {
+ ns->flags |= SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED;
+ ns->extended_lba_size += md_size;
+ }
+ ns->md_size = md_size;
+ ns->sectors_per_max_io = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->extended_lba_size;
+ ns->sectors_per_stripe = stripe_size / ns->extended_lba_size;
+
+ memset(qpair, 0, sizeof(*qpair));
+ qpair->ctrlr = ctrlr;
+ qpair->req_buf = calloc(num_requests, sizeof(struct nvme_request));
+ SPDK_CU_ASSERT_FATAL(qpair->req_buf != NULL);
+
+ for (i = 0; i < num_requests; i++) {
+ struct nvme_request *req = qpair->req_buf + i * sizeof(struct nvme_request);
+
+ STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
+ }
+
+ g_request = NULL;
+}
+
+static void
+cleanup_after_test(struct spdk_nvme_qpair *qpair)
+{
+ free(qpair->req_buf);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_reset_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ uint64_t lba_list = 0x12345678;
+ spdk_nvme_ocssd_ns_cmd_vector_reset(&ns, &qpair, &lba_list, 1,
+ NULL, NULL, NULL);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_RESET);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_reset(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ uint64_t lba_list[vector_size];
+ spdk_nvme_ocssd_ns_cmd_vector_reset(&ns, &qpair, lba_list, vector_size,
+ NULL, NULL, NULL);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_RESET);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_read_with_md_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+ const uint32_t md_size = 0x80;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size);
+ char *metadata = malloc(md_size);
+ uint64_t lba_list = 0x12345678;
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
+ rc = spdk_nvme_ocssd_ns_cmd_vector_read_with_md(&ns, &qpair, buffer, metadata,
+ &lba_list, 1, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == PAGE_SIZE);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_read_with_md(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+ const uint32_t md_size = 0x80;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size * vector_size);
+ char *metadata = malloc(md_size * vector_size);
+ uint64_t lba_list[vector_size];
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
+ rc = spdk_nvme_ocssd_ns_cmd_vector_read_with_md(&ns, &qpair, buffer, metadata,
+ lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == max_xfer_size);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_read_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size);
+ uint64_t lba_list = 0x12345678;
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ rc = spdk_nvme_ocssd_ns_cmd_vector_read(&ns, &qpair, buffer, &lba_list, 1,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload_size == PAGE_SIZE);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+ free(buffer);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_read(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size * vector_size);
+ uint64_t lba_list[vector_size];
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ rc = spdk_nvme_ocssd_ns_cmd_vector_read(&ns, &qpair, buffer, lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload_size == max_xfer_size);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+ free(buffer);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_write_with_md_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+ const uint32_t md_size = 0x80;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size);
+ char *metadata = malloc(md_size);
+ uint64_t lba_list = 0x12345678;
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_write_with_md(&ns, &qpair, buffer, metadata,
+ &lba_list, 1, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == PAGE_SIZE);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+
+static void
+test_nvme_ocssd_ns_cmd_vector_write_with_md(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+ const uint32_t md_size = 0x80;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size * vector_size);
+ char *metadata = malloc(md_size * vector_size);
+ uint64_t lba_list[vector_size];
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_write_with_md(&ns, &qpair, buffer, metadata,
+ lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == max_xfer_size);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_write_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size);
+ uint64_t lba_list = 0x12345678;
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_write(&ns, &qpair, buffer,
+ &lba_list, 1, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload_size == PAGE_SIZE);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_write(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size * vector_size);
+ uint64_t lba_list[vector_size];
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_write(&ns, &qpair, buffer,
+ lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload_size == max_xfer_size);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_copy_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ uint64_t src_lba_list = 0x12345678;
+ uint64_t dst_lba_list = 0x87654321;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_copy(&ns, &qpair, &dst_lba_list, &src_lba_list, 1,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_COPY);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == src_lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+ CU_ASSERT(g_request->cmd.cdw14 == dst_lba_list);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_copy(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ uint64_t src_lba_list[vector_size];
+ uint64_t dst_lba_list[vector_size];
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_copy(&ns, &qpair,
+ dst_lba_list, src_lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_COPY);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme_ns_cmd", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "nvme_ns_ocssd_cmd_vector_reset", test_nvme_ocssd_ns_cmd_vector_reset) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_reset_single_entry",
+ test_nvme_ocssd_ns_cmd_vector_reset_single_entry) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_read_with_md",
+ test_nvme_ocssd_ns_cmd_vector_read_with_md) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_read_with_md_single_entry",
+ test_nvme_ocssd_ns_cmd_vector_read_with_md_single_entry) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_read", test_nvme_ocssd_ns_cmd_vector_read) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_read_single_entry",
+ test_nvme_ocssd_ns_cmd_vector_read_single_entry) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_write_with_md",
+ test_nvme_ocssd_ns_cmd_vector_write_with_md) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_write_with_md_single_entry",
+ test_nvme_ocssd_ns_cmd_vector_write_with_md_single_entry) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_write", test_nvme_ocssd_ns_cmd_vector_write) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_write_single_entry",
+ test_nvme_ocssd_ns_cmd_vector_write_single_entry) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_copy", test_nvme_ocssd_ns_cmd_vector_copy) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_copy_single_entry",
+ test_nvme_ocssd_ns_cmd_vector_copy_single_entry) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ g_spdk_nvme_driver = &_g_nvme_driver;
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_pcie.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/.gitignore
new file mode 100644
index 00000000..8fc29109
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/.gitignore
@@ -0,0 +1 @@
+nvme_pcie_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_pcie.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/Makefile
new file mode 100644
index 00000000..09032a93
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_pcie_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c
new file mode 100644
index 00000000..2bec5865
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c
@@ -0,0 +1,861 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+
+#include "nvme/nvme_pcie.c"
+
+pid_t g_spdk_nvme_pid;
+
+DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
+DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
+
+DEFINE_STUB(spdk_nvme_ctrlr_get_process,
+ struct spdk_nvme_ctrlr_process *,
+ (struct spdk_nvme_ctrlr *ctrlr, pid_t pid),
+ NULL);
+
+DEFINE_STUB(spdk_nvme_ctrlr_get_current_process,
+ struct spdk_nvme_ctrlr_process *,
+ (struct spdk_nvme_ctrlr *ctrlr),
+ NULL);
+
+DEFINE_STUB(spdk_nvme_wait_for_completion, int,
+ (struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status), 0);
+
+struct spdk_trace_flag SPDK_LOG_NVME = {
+ .name = "nvme",
+ .enabled = false,
+};
+
+static struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+struct nvme_driver *g_spdk_nvme_driver = &_g_nvme_driver;
+
+int32_t spdk_nvme_retry_count = 1;
+
+struct nvme_request *g_request = NULL;
+
+extern bool ut_fail_vtophys;
+
+bool fail_next_sge = false;
+
+struct io_request {
+ uint64_t address_offset;
+ bool invalid_addr;
+ bool invalid_second_addr;
+};
+
+void
+nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
+{
+ abort();
+}
+
+int
+spdk_uevent_connect(void)
+{
+ abort();
+}
+
+int
+spdk_get_uevent(int fd, struct spdk_uevent *uevent)
+{
+ abort();
+}
+
+struct spdk_pci_id
+spdk_pci_device_get_id(struct spdk_pci_device *dev)
+{
+ abort();
+}
+
+int
+nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
+ struct spdk_nvme_ctrlr *ctrlr,
+ enum spdk_nvme_qprio qprio,
+ uint32_t num_requests)
+{
+ abort();
+}
+
+void
+nvme_qpair_deinit(struct spdk_nvme_qpair *qpair)
+{
+ abort();
+}
+
+int
+spdk_pci_nvme_enumerate(spdk_pci_enum_cb enum_cb, void *enum_ctx)
+{
+ abort();
+}
+
+int
+spdk_pci_nvme_device_attach(spdk_pci_enum_cb enum_cb, void *enum_ctx,
+ struct spdk_pci_addr *pci_address)
+{
+ abort();
+}
+
+void
+spdk_pci_device_detach(struct spdk_pci_device *device)
+{
+ abort();
+}
+
+int
+spdk_pci_device_map_bar(struct spdk_pci_device *dev, uint32_t bar,
+ void **mapped_addr, uint64_t *phys_addr, uint64_t *size)
+{
+ abort();
+}
+
+int
+spdk_pci_device_unmap_bar(struct spdk_pci_device *dev, uint32_t bar, void *addr)
+{
+ abort();
+}
+
+struct spdk_pci_addr
+spdk_pci_device_get_addr(struct spdk_pci_device *dev)
+{
+ abort();
+}
+
+int
+spdk_pci_device_cfg_read32(struct spdk_pci_device *dev, uint32_t *value, uint32_t offset)
+{
+ abort();
+}
+
+int
+spdk_pci_device_cfg_write32(struct spdk_pci_device *dev, uint32_t value, uint32_t offset)
+{
+ abort();
+}
+
+int
+spdk_pci_device_claim(const struct spdk_pci_addr *pci_addr)
+{
+ abort();
+}
+
+int
+nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
+{
+ abort();
+}
+
+void
+nvme_ctrlr_destruct_finish(struct spdk_nvme_ctrlr *ctrlr)
+{
+ abort();
+}
+
+void
+nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+ abort();
+}
+
+int
+nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
+{
+ abort();
+}
+
+void
+nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr)
+{
+ abort();
+}
+
+struct spdk_pci_device *
+nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr)
+{
+ abort();
+}
+
+int
+nvme_ctrlr_probe(const struct spdk_nvme_transport_id *trid, void *devhandle,
+ spdk_nvme_probe_cb probe_cb, void *cb_ctx)
+{
+ abort();
+}
+
+int
+nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap)
+{
+ abort();
+}
+
+int
+nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs)
+{
+ abort();
+}
+
+void
+nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cap_register *cap,
+ const union spdk_nvme_vs_register *vs)
+{
+ abort();
+}
+
+uint64_t
+nvme_get_quirks(const struct spdk_pci_id *id)
+{
+ abort();
+}
+
+bool
+nvme_completion_is_retry(const struct spdk_nvme_cpl *cpl)
+{
+ abort();
+}
+
+void
+nvme_qpair_print_command(struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd)
+{
+ abort();
+}
+
+void
+nvme_qpair_print_completion(struct spdk_nvme_qpair *qpair, struct spdk_nvme_cpl *cpl)
+{
+ abort();
+}
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ abort();
+}
+
+int
+nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
+ struct nvme_request *req)
+{
+ abort();
+}
+
+void
+nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ abort();
+}
+
+int32_t
+spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ abort();
+}
+
+void
+nvme_qpair_enable(struct spdk_nvme_qpair *qpair)
+{
+ abort();
+}
+
+int
+nvme_request_check_timeout(struct nvme_request *req, uint16_t cid,
+ struct spdk_nvme_ctrlr_process *active_proc,
+ uint64_t now_tick)
+{
+ abort();
+}
+
+struct spdk_nvme_ctrlr *
+spdk_nvme_get_ctrlr_by_trid_unsafe(const struct spdk_nvme_transport_id *trid)
+{
+ return NULL;
+}
+
+union spdk_nvme_csts_register spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
+{
+ union spdk_nvme_csts_register csts = {};
+
+ return csts;
+}
+
+#if 0 /* TODO: update PCIe-specific unit test */
+static void
+nvme_request_reset_sgl(void *cb_arg, uint32_t sgl_offset)
+{
+ struct io_request *req = (struct io_request *)cb_arg;
+
+ req->address_offset = 0;
+ req->invalid_addr = false;
+ req->invalid_second_addr = false;
+ switch (sgl_offset) {
+ case 0:
+ req->invalid_addr = false;
+ break;
+ case 1:
+ req->invalid_addr = true;
+ break;
+ case 2:
+ req->invalid_addr = false;
+ req->invalid_second_addr = true;
+ break;
+ default:
+ break;
+ }
+ return;
+}
+
+static int
+nvme_request_next_sge(void *cb_arg, void **address, uint32_t *length)
+{
+ struct io_request *req = (struct io_request *)cb_arg;
+
+ if (req->address_offset == 0) {
+ if (req->invalid_addr) {
+ *address = (void *)7;
+ } else {
+ *address = (void *)(4096 * req->address_offset);
+ }
+ } else if (req->address_offset == 1) {
+ if (req->invalid_second_addr) {
+ *address = (void *)7;
+ } else {
+ *address = (void *)(4096 * req->address_offset);
+ }
+ } else {
+ *address = (void *)(4096 * req->address_offset);
+ }
+
+ req->address_offset += 1;
+ *length = 4096;
+
+ if (fail_next_sge) {
+ return - 1;
+ } else {
+ return 0;
+ }
+
+}
+
+static void
+prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
+ struct spdk_nvme_ctrlr *ctrlr)
+{
+ memset(ctrlr, 0, sizeof(*ctrlr));
+ ctrlr->free_io_qids = NULL;
+ TAILQ_INIT(&ctrlr->active_io_qpairs);
+ TAILQ_INIT(&ctrlr->active_procs);
+ nvme_qpair_init(qpair, 1, ctrlr, 0);
+
+ ut_fail_vtophys = false;
+}
+
+static void
+cleanup_submit_request_test(struct spdk_nvme_qpair *qpair)
+{
+}
+
+static void
+ut_insert_cq_entry(struct spdk_nvme_qpair *qpair, uint32_t slot)
+{
+ struct nvme_request *req;
+ struct nvme_tracker *tr;
+ struct spdk_nvme_cpl *cpl;
+
+ req = calloc(1, sizeof(*req));
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ memset(req, 0, sizeof(*req));
+
+ tr = TAILQ_FIRST(&qpair->free_tr);
+ TAILQ_REMOVE(&qpair->free_tr, tr, tq_list); /* remove tr from free_tr */
+ TAILQ_INSERT_HEAD(&qpair->outstanding_tr, tr, tq_list);
+ req->cmd.cid = tr->cid;
+ tr->req = req;
+ qpair->tr[tr->cid].active = true;
+
+ cpl = &qpair->cpl[slot];
+ cpl->status.p = qpair->phase;
+ cpl->cid = tr->cid;
+}
+
+static void
+expected_success_callback(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ CU_ASSERT(!spdk_nvme_cpl_is_error(cpl));
+}
+
+static void
+expected_failure_callback(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ CU_ASSERT(spdk_nvme_cpl_is_error(cpl));
+}
+
+static void
+test4(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct nvme_request *req;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ char payload[4096];
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+
+ req = nvme_allocate_request_contig(payload, sizeof(payload), expected_failure_callback, NULL);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ /* Force vtophys to return a failure. This should
+ * result in the nvme_qpair manually failing
+ * the request with error status to signify
+ * a bad payload buffer.
+ */
+ ut_fail_vtophys = true;
+
+ CU_ASSERT(qpair.sq_tail == 0);
+
+ CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
+
+ CU_ASSERT(qpair.sq_tail == 0);
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void
+test_sgl_req(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct nvme_request *req;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct nvme_payload payload = {};
+ struct nvme_tracker *sgl_tr = NULL;
+ uint64_t i;
+ struct io_request io_req = {};
+
+ payload = NVME_PAYLOAD_SGL(nvme_request_reset_sgl, nvme_request_next_sge, &io_req, NULL);
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+ req = nvme_allocate_request(&payload, 0x1000, NULL, &io_req);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ req->cmd.opc = SPDK_NVME_OPC_WRITE;
+ req->cmd.cdw10 = 10000;
+ req->cmd.cdw12 = 7 | 0;
+ req->payload_offset = 1;
+
+ CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
+ CU_ASSERT(qpair.sq_tail == 0);
+ cleanup_submit_request_test(&qpair);
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+ req = nvme_allocate_request(&payload, 0x1000, NULL, &io_req);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ req->cmd.opc = SPDK_NVME_OPC_WRITE;
+ req->cmd.cdw10 = 10000;
+ req->cmd.cdw12 = 7 | 0;
+ spdk_nvme_retry_count = 1;
+ fail_next_sge = true;
+
+ CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
+ CU_ASSERT(qpair.sq_tail == 0);
+ cleanup_submit_request_test(&qpair);
+
+ fail_next_sge = false;
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+ req = nvme_allocate_request(&payload, 2 * 0x1000, NULL, &io_req);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ req->cmd.opc = SPDK_NVME_OPC_WRITE;
+ req->cmd.cdw10 = 10000;
+ req->cmd.cdw12 = 15 | 0;
+ req->payload_offset = 2;
+
+ CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
+ CU_ASSERT(qpair.sq_tail == 0);
+ cleanup_submit_request_test(&qpair);
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+ req = nvme_allocate_request(&payload, (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, NULL, &io_req);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ req->cmd.opc = SPDK_NVME_OPC_WRITE;
+ req->cmd.cdw10 = 10000;
+ req->cmd.cdw12 = 4095 | 0;
+
+ CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);
+
+ CU_ASSERT(req->cmd.dptr.prp.prp1 == 0);
+ CU_ASSERT(qpair.sq_tail == 1);
+ sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
+ if (sgl_tr != NULL) {
+ for (i = 0; i < NVME_MAX_PRP_LIST_ENTRIES; i++) {
+ CU_ASSERT(sgl_tr->u.prp[i] == (0x1000 * (i + 1)));
+ }
+
+ TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
+ }
+ cleanup_submit_request_test(&qpair);
+ nvme_free_request(req);
+}
+
+static void
+test_hw_sgl_req(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct nvme_request *req;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct nvme_payload payload = {};
+ struct nvme_tracker *sgl_tr = NULL;
+ uint64_t i;
+ struct io_request io_req = {};
+
+ payload = NVME_PAYLOAD_SGL(nvme_request_reset_sgl, nvme_request_next_sge, &io_req, NULL);
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+ req = nvme_allocate_request(&payload, 0x1000, NULL, &io_req);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ req->cmd.opc = SPDK_NVME_OPC_WRITE;
+ req->cmd.cdw10 = 10000;
+ req->cmd.cdw12 = 7 | 0;
+ req->payload_offset = 0;
+ ctrlr.flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;
+
+ nvme_qpair_submit_request(&qpair, req);
+
+ sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
+ CU_ASSERT(sgl_tr != NULL);
+ CU_ASSERT(sgl_tr->u.sgl[0].generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(sgl_tr->u.sgl[0].generic.subtype == 0);
+ CU_ASSERT(sgl_tr->u.sgl[0].unkeyed.length == 4096);
+ CU_ASSERT(sgl_tr->u.sgl[0].address == 0);
+ CU_ASSERT(req->cmd.dptr.sgl1.generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
+ cleanup_submit_request_test(&qpair);
+ nvme_free_request(req);
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+ req = nvme_allocate_request(&payload, NVME_MAX_SGL_DESCRIPTORS * 0x1000, NULL, &io_req);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ req->cmd.opc = SPDK_NVME_OPC_WRITE;
+ req->cmd.cdw10 = 10000;
+ req->cmd.cdw12 = 2023 | 0;
+ req->payload_offset = 0;
+ ctrlr.flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;
+
+ nvme_qpair_submit_request(&qpair, req);
+
+ sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
+ CU_ASSERT(sgl_tr != NULL);
+ for (i = 0; i < NVME_MAX_SGL_DESCRIPTORS; i++) {
+ CU_ASSERT(sgl_tr->u.sgl[i].generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(sgl_tr->u.sgl[i].generic.subtype == 0);
+ CU_ASSERT(sgl_tr->u.sgl[i].unkeyed.length == 4096);
+ CU_ASSERT(sgl_tr->u.sgl[i].address == i * 4096);
+ }
+ CU_ASSERT(req->cmd.dptr.sgl1.generic.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
+ TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
+ cleanup_submit_request_test(&qpair);
+ nvme_free_request(req);
+}
+
+static void test_nvme_qpair_fail(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct nvme_request *req = NULL;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct nvme_tracker *tr_temp;
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+
+ tr_temp = TAILQ_FIRST(&qpair.free_tr);
+ SPDK_CU_ASSERT_FATAL(tr_temp != NULL);
+ TAILQ_REMOVE(&qpair.free_tr, tr_temp, tq_list);
+ tr_temp->req = nvme_allocate_request_null(expected_failure_callback, NULL);
+ SPDK_CU_ASSERT_FATAL(tr_temp->req != NULL);
+ tr_temp->req->cmd.cid = tr_temp->cid;
+
+ TAILQ_INSERT_HEAD(&qpair.outstanding_tr, tr_temp, tq_list);
+ nvme_qpair_fail(&qpair);
+ CU_ASSERT_TRUE(TAILQ_EMPTY(&qpair.outstanding_tr));
+
+ req = nvme_allocate_request_null(expected_failure_callback, NULL);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ STAILQ_INSERT_HEAD(&qpair.queued_req, req, stailq);
+ nvme_qpair_fail(&qpair);
+ CU_ASSERT_TRUE(STAILQ_EMPTY(&qpair.queued_req));
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void
+test_nvme_qpair_process_completions_limit(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+ qpair.is_enabled = true;
+
+ /* Insert 4 entries into the completion queue */
+ CU_ASSERT(qpair.cq_head == 0);
+ ut_insert_cq_entry(&qpair, 0);
+ ut_insert_cq_entry(&qpair, 1);
+ ut_insert_cq_entry(&qpair, 2);
+ ut_insert_cq_entry(&qpair, 3);
+
+ /* This should only process 2 completions, and 2 should be left in the queue */
+ spdk_nvme_qpair_process_completions(&qpair, 2);
+ CU_ASSERT(qpair.cq_head == 2);
+
+ /* This should only process 1 completion, and 1 should be left in the queue */
+ spdk_nvme_qpair_process_completions(&qpair, 1);
+ CU_ASSERT(qpair.cq_head == 3);
+
+ /* This should process the remaining completion */
+ spdk_nvme_qpair_process_completions(&qpair, 5);
+ CU_ASSERT(qpair.cq_head == 4);
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void test_nvme_qpair_destroy(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct nvme_tracker *tr_temp;
+
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ TAILQ_INIT(&ctrlr.free_io_qpairs);
+ TAILQ_INIT(&ctrlr.active_io_qpairs);
+ TAILQ_INIT(&ctrlr.active_procs);
+
+ nvme_qpair_init(&qpair, 1, 128, &ctrlr);
+ nvme_qpair_destroy(&qpair);
+
+
+ nvme_qpair_init(&qpair, 0, 128, &ctrlr);
+ tr_temp = TAILQ_FIRST(&qpair.free_tr);
+ SPDK_CU_ASSERT_FATAL(tr_temp != NULL);
+ TAILQ_REMOVE(&qpair.free_tr, tr_temp, tq_list);
+ tr_temp->req = nvme_allocate_request_null(expected_failure_callback, NULL);
+ SPDK_CU_ASSERT_FATAL(tr_temp->req != NULL);
+
+ tr_temp->req->cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
+ tr_temp->req->cmd.cid = tr_temp->cid;
+ TAILQ_INSERT_HEAD(&qpair.outstanding_tr, tr_temp, tq_list);
+
+ nvme_qpair_destroy(&qpair);
+ CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding_tr));
+}
+#endif
+
+static void
+prp_list_prep(struct nvme_tracker *tr, struct nvme_request *req, uint32_t *prp_index)
+{
+ memset(req, 0, sizeof(*req));
+ memset(tr, 0, sizeof(*tr));
+ tr->req = req;
+ tr->prp_sgl_bus_addr = 0xDEADBEEF;
+ *prp_index = 0;
+}
+
+static void
+test_prp_list_append(void)
+{
+ struct nvme_request req;
+ struct nvme_tracker tr;
+ uint32_t prp_index;
+
+ /* Non-DWORD-aligned buffer (invalid) */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100001, 0x1000, 0x1000) == -EINVAL);
+
+ /* 512-byte buffer, 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x200, 0x1000) == 0);
+ CU_ASSERT(prp_index == 1);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+
+ /* 512-byte buffer, non-4K-aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x108000, 0x200, 0x1000) == 0);
+ CU_ASSERT(prp_index == 1);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x108000);
+
+ /* 4K buffer, 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 1);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+
+ /* 4K buffer, non-4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
+
+ /* 8K buffer, 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x2000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
+
+ /* 8K buffer, non-4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x2000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 3);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
+ CU_ASSERT(tr.u.prp[0] == 0x101000);
+ CU_ASSERT(tr.u.prp[1] == 0x102000);
+
+ /* 12K buffer, 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x3000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 3);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
+ CU_ASSERT(tr.u.prp[0] == 0x101000);
+ CU_ASSERT(tr.u.prp[1] == 0x102000);
+
+ /* 12K buffer, non-4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x3000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 4);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
+ CU_ASSERT(tr.u.prp[0] == 0x101000);
+ CU_ASSERT(tr.u.prp[1] == 0x102000);
+ CU_ASSERT(tr.u.prp[2] == 0x103000);
+
+ /* Two 4K buffers, both 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 1);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900000, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x900000);
+
+ /* Two 4K buffers, first non-4K aligned, second 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900000, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 3);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
+ CU_ASSERT(tr.u.prp[0] == 0x101000);
+ CU_ASSERT(tr.u.prp[1] == 0x900000);
+
+ /* Two 4K buffers, both non-4K aligned (invalid) */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900800, 0x1000, 0x1000) == -EINVAL);
+ CU_ASSERT(prp_index == 2);
+
+ /* 4K buffer, 4K aligned, but vtophys fails */
+ MOCK_SET(spdk_vtophys, SPDK_VTOPHYS_ERROR);
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000, 0x1000) == -EINVAL);
+ MOCK_CLEAR(spdk_vtophys);
+
+ /* Largest aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000,
+ (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
+
+ /* Largest non-4K-aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800,
+ NVME_MAX_PRP_LIST_ENTRIES * 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
+
+ /* Buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000,
+ (NVME_MAX_PRP_LIST_ENTRIES + 2) * 0x1000, 0x1000) == -EINVAL);
+
+ /* Non-4K-aligned buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800,
+ (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == -EINVAL);
+}
+
+static void test_shadow_doorbell_update(void)
+{
+ bool ret;
+
+ /* nvme_pcie_qpair_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old) */
+ ret = nvme_pcie_qpair_need_event(10, 15, 14);
+ CU_ASSERT(ret == false);
+
+ ret = nvme_pcie_qpair_need_event(14, 15, 14);
+ CU_ASSERT(ret == true);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme_pcie", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (CU_add_test(suite, "prp_list_append", test_prp_list_append) == NULL
+ || CU_add_test(suite, "shadow_doorbell_update",
+ test_shadow_doorbell_update) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_qpair.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/.gitignore
new file mode 100644
index 00000000..1bb18e99
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/.gitignore
@@ -0,0 +1 @@
+nvme_qpair_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_qpair.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/Makefile
new file mode 100644
index 00000000..d7762a38
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_qpair_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c
new file mode 100644
index 00000000..11fea8c7
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c
@@ -0,0 +1,418 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+
+pid_t g_spdk_nvme_pid;
+
+bool trace_flag = false;
+#define SPDK_LOG_NVME trace_flag
+
+#include "nvme/nvme_qpair.c"
+
+struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+void
+nvme_request_remove_child(struct nvme_request *parent,
+ struct nvme_request *child)
+{
+ parent->num_children--;
+ TAILQ_REMOVE(&parent->children, child, child_tailq);
+}
+
+int
+nvme_transport_qpair_enable(struct spdk_nvme_qpair *qpair)
+{
+ return 0;
+}
+
+int
+nvme_transport_qpair_disable(struct spdk_nvme_qpair *qpair)
+{
+ return 0;
+}
+
+int
+nvme_transport_qpair_fail(struct spdk_nvme_qpair *qpair)
+{
+ return 0;
+}
+
+int
+nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ // TODO
+ return 0;
+}
+
+int32_t
+nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ // TODO
+ return 0;
+}
+
+int
+spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
+{
+ return 0;
+}
+
+static void
+prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
+ struct spdk_nvme_ctrlr *ctrlr)
+{
+ memset(ctrlr, 0, sizeof(*ctrlr));
+ ctrlr->free_io_qids = NULL;
+ TAILQ_INIT(&ctrlr->active_io_qpairs);
+ TAILQ_INIT(&ctrlr->active_procs);
+ nvme_qpair_init(qpair, 1, ctrlr, 0, 32);
+}
+
+static void
+cleanup_submit_request_test(struct spdk_nvme_qpair *qpair)
+{
+ free(qpair->req_buf);
+}
+
+static void
+expected_success_callback(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ CU_ASSERT(!spdk_nvme_cpl_is_error(cpl));
+}
+
+static void
+expected_failure_callback(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ CU_ASSERT(spdk_nvme_cpl_is_error(cpl));
+}
+
+static void
+test3(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct nvme_request *req;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+
+ req = nvme_allocate_request_null(&qpair, expected_success_callback, NULL);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);
+
+ nvme_free_request(req);
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void
+test_ctrlr_failed(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct nvme_request *req;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ char payload[4096];
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+
+ req = nvme_allocate_request_contig(&qpair, payload, sizeof(payload), expected_failure_callback,
+ NULL);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ /* Set the controller to failed.
+ * Set the controller to resetting so that the qpair won't get re-enabled.
+ */
+ ctrlr.is_failed = true;
+ ctrlr.is_resetting = true;
+
+ CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void struct_packing(void)
+{
+ /* ctrlr is the first field in nvme_qpair after the fields
+ * that are used in the I/O path. Make sure the I/O path fields
+ * all fit into two cache lines.
+ */
+ CU_ASSERT(offsetof(struct spdk_nvme_qpair, ctrlr) <= 128);
+}
+
+static void test_nvme_qpair_process_completions(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+ qpair.ctrlr->is_resetting = true;
+
+ spdk_nvme_qpair_process_completions(&qpair, 0);
+ cleanup_submit_request_test(&qpair);
+}
+
+static void test_nvme_completion_is_retry(void)
+{
+ struct spdk_nvme_cpl cpl = {};
+
+ cpl.status.sct = SPDK_NVME_SCT_GENERIC;
+ cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
+ cpl.status.dnr = 0;
+ CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_FORMAT_IN_PROGRESS;
+ cpl.status.dnr = 1;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+ cpl.status.dnr = 0;
+ CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_FIELD;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_COMMAND_ID_CONFLICT;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_DATA_TRANSFER_ERROR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ABORTED_POWER_LOSS;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_SGL_SEG_DESCRIPTOR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_METADATA_SGL_LENGTH_INVALID;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_CONTROLLER_MEM_BUF;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_PRP_OFFSET;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_CAPACITY_EXCEEDED;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_RESERVATION_CONFLICT;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = 0x70;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_PATH;
+ cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
+ cpl.status.dnr = 0;
+ CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_PATH;
+ cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
+ cpl.status.dnr = 1;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = 0x4;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+}
+
+#ifdef DEBUG
+static void
+test_get_status_string(void)
+{
+ const char *status_string;
+
+ status_string = get_status_string(SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(strcmp(status_string, "SUCCESS") == 0);
+
+ status_string = get_status_string(SPDK_NVME_SCT_COMMAND_SPECIFIC,
+ SPDK_NVME_SC_COMPLETION_QUEUE_INVALID);
+ CU_ASSERT(strcmp(status_string, "INVALID COMPLETION QUEUE") == 0);
+
+ status_string = get_status_string(SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
+ CU_ASSERT(strcmp(status_string, "UNRECOVERED READ ERROR") == 0);
+
+ status_string = get_status_string(SPDK_NVME_SCT_VENDOR_SPECIFIC, 0);
+ CU_ASSERT(strcmp(status_string, "VENDOR SPECIFIC") == 0);
+
+ status_string = get_status_string(100, 0);
+ CU_ASSERT(strcmp(status_string, "RESERVED") == 0);
+}
+#endif
+
+static void
+test_nvme_qpair_add_cmd_error_injection(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_ctrlr ctrlr = {};
+ int rc;
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+ ctrlr.adminq = &qpair;
+
+ /* Admin error injection at submission path */
+ rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, NULL,
+ SPDK_NVME_OPC_GET_FEATURES, true, 5000, 1,
+ SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_INVALID_FIELD);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ /* Remove cmd error injection */
+ spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, NULL, SPDK_NVME_OPC_GET_FEATURES);
+
+ CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ /* IO error injection at completion path */
+ rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
+ SPDK_NVME_OPC_READ, false, 0, 1,
+ SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ /* Provide the same opc, and check whether allocate a new entry */
+ rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
+ SPDK_NVME_OPC_READ, false, 0, 1,
+ SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
+
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&qpair.err_cmd_head));
+ CU_ASSERT(TAILQ_NEXT(TAILQ_FIRST(&qpair.err_cmd_head), link) == NULL);
+
+ /* Remove cmd error injection */
+ spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_READ);
+
+ CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
+ SPDK_NVME_OPC_COMPARE, true, 0, 5,
+ SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_COMPARE_FAILURE);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ /* Remove cmd error injection */
+ spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_COMPARE);
+
+ CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ cleanup_submit_request_test(&qpair);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme_qpair", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (CU_add_test(suite, "test3", test3) == NULL
+ || CU_add_test(suite, "ctrlr_failed", test_ctrlr_failed) == NULL
+ || CU_add_test(suite, "struct_packing", struct_packing) == NULL
+ || CU_add_test(suite, "spdk_nvme_qpair_process_completions",
+ test_nvme_qpair_process_completions) == NULL
+ || CU_add_test(suite, "nvme_completion_is_retry", test_nvme_completion_is_retry) == NULL
+#ifdef DEBUG
+ || CU_add_test(suite, "get_status_string", test_get_status_string) == NULL
+#endif
+ || CU_add_test(suite, "spdk_nvme_qpair_add_cmd_error_injection",
+ test_nvme_qpair_add_cmd_error_injection) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_quirks.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/.gitignore
new file mode 100644
index 00000000..eca86651
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/.gitignore
@@ -0,0 +1 @@
+nvme_quirks_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_quirks.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/Makefile
new file mode 100644
index 00000000..d86887f0
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_quirks_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_quirks.c/nvme_quirks_ut.c b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/nvme_quirks_ut.c
new file mode 100644
index 00000000..95fdd143
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/nvme_quirks_ut.c
@@ -0,0 +1,102 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_quirks.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME)
+
+static void
+test_nvme_quirks_striping(void)
+{
+ struct spdk_pci_id pci_id = {};
+ uint64_t quirks = 0;
+
+ /* Non-Intel device should not have striping enabled */
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) == 0);
+
+ /* Set the vendor id to Intel, but no device id. No striping. */
+ pci_id.vendor_id = SPDK_PCI_VID_INTEL;
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) == 0);
+
+ /* Device ID 0x0953 should have striping enabled */
+ pci_id.device_id = 0x0953;
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) != 0);
+
+ /* Even if specific subvendor/subdevice ids are set,
+ * striping should be enabled.
+ */
+ pci_id.subvendor_id = SPDK_PCI_VID_INTEL;
+ pci_id.subdevice_id = 0x3704;
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) != 0);
+
+ pci_id.subvendor_id = 1234;
+ pci_id.subdevice_id = 42;
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) != 0);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme_quirks", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test nvme_quirks striping",
+ test_nvme_quirks_striping) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_rdma.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/.gitignore
new file mode 100644
index 00000000..66265b95
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/.gitignore
@@ -0,0 +1 @@
+nvme_rdma_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_rdma.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/Makefile
new file mode 100644
index 00000000..7ea42632
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_rdma_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c
new file mode 100644
index 00000000..87835ab6
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c
@@ -0,0 +1,298 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+#include "nvme/nvme_rdma.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME)
+
+DEFINE_STUB(nvme_qpair_submit_request, int, (struct spdk_nvme_qpair *qpair,
+ struct nvme_request *req), 0);
+
+DEFINE_STUB(nvme_qpair_init, int, (struct spdk_nvme_qpair *qpair, uint16_t id,
+ struct spdk_nvme_ctrlr *ctrlr, enum spdk_nvme_qprio qprio, uint32_t num_requests), 0);
+
+DEFINE_STUB_V(nvme_qpair_deinit, (struct spdk_nvme_qpair *qpair));
+
+DEFINE_STUB(nvme_ctrlr_probe, int, (const struct spdk_nvme_transport_id *trid, void *devhandle,
+ spdk_nvme_probe_cb probe_cb, void *cb_ctx), 0);
+
+DEFINE_STUB(nvme_ctrlr_get_cap, int, (struct spdk_nvme_ctrlr *ctrlr,
+ union spdk_nvme_cap_register *cap), 0);
+
+DEFINE_STUB(nvme_ctrlr_get_vs, int, (struct spdk_nvme_ctrlr *ctrlr,
+ union spdk_nvme_vs_register *vs), 0);
+
+DEFINE_STUB_V(nvme_ctrlr_init_cap, (struct spdk_nvme_ctrlr *ctrlr,
+ const union spdk_nvme_cap_register *cap, const union spdk_nvme_vs_register *vs));
+
+DEFINE_STUB(nvme_ctrlr_construct, int, (struct spdk_nvme_ctrlr *ctrlr), 0);
+
+DEFINE_STUB_V(nvme_ctrlr_destruct, (struct spdk_nvme_ctrlr *ctrlr));
+
+DEFINE_STUB(nvme_ctrlr_add_process, int, (struct spdk_nvme_ctrlr *ctrlr, void *devhandle), 0);
+
+DEFINE_STUB_V(nvme_ctrlr_connected, (struct spdk_nvme_ctrlr *ctrlr));
+
+DEFINE_STUB(nvme_ctrlr_cmd_identify, int, (struct spdk_nvme_ctrlr *ctrlr, uint8_t cns,
+ uint16_t cntid, uint32_t nsid, void *payload, size_t payload_size, spdk_nvme_cmd_cb cb_fn,
+ void *cb_arg), 0);
+
+DEFINE_STUB_V(spdk_nvme_ctrlr_get_default_ctrlr_opts, (struct spdk_nvme_ctrlr_opts *opts,
+ size_t opts_size));
+
+DEFINE_STUB_V(nvme_completion_poll_cb, (void *arg, const struct spdk_nvme_cpl *cpl));
+
+DEFINE_STUB(spdk_nvme_ctrlr_get_current_process, struct spdk_nvme_ctrlr_process *,
+ (struct spdk_nvme_ctrlr *ctrlr), NULL);
+
+DEFINE_STUB(spdk_nvme_wait_for_completion, int, (struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status), 0);
+
+DEFINE_STUB(spdk_nvme_wait_for_completion_robust_lock, int, (struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status, pthread_mutex_t *robust_mutex), 0);
+
+DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
+ uint64_t size, uint64_t translation), 0);
+
+DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
+ uint64_t size), 0);
+
+DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation,
+ const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL);
+
+DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap));
+
+DEFINE_STUB(nvme_fabric_qpair_connect, int, (struct spdk_nvme_qpair *qpair, uint32_t num_entries),
+ 0);
+
+DEFINE_STUB(nvme_transport_ctrlr_set_reg_4, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
+ uint32_t value), 0);
+
+DEFINE_STUB(nvme_fabric_ctrlr_set_reg_4, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
+ uint32_t value), 0);
+
+DEFINE_STUB(nvme_fabric_ctrlr_set_reg_8, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
+ uint64_t value), 0);
+
+DEFINE_STUB(nvme_fabric_ctrlr_get_reg_4, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
+ uint32_t *value), 0);
+
+DEFINE_STUB(nvme_fabric_ctrlr_get_reg_8, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
+ uint64_t *value), 0);
+
+DEFINE_STUB_V(nvme_ctrlr_destruct_finish, (struct spdk_nvme_ctrlr *ctrlr));
+
+DEFINE_STUB(nvme_request_check_timeout, int, (struct nvme_request *req, uint16_t cid,
+ struct spdk_nvme_ctrlr_process *active_proc, uint64_t now_tick), 0);
+
+DEFINE_STUB(nvme_fabric_ctrlr_discover, int, (struct spdk_nvme_ctrlr *ctrlr, void *cb_ctx,
+ spdk_nvme_probe_cb probe_cb), 0);
+
+/* used to mock out having to split an SGL over a memory region */
+uint64_t g_mr_size;
+struct ibv_mr g_nvme_rdma_mr;
+
+uint64_t
+spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size)
+{
+ if (g_mr_size != 0) {
+ *(uint32_t *)size = g_mr_size;
+ }
+
+ return (uint64_t)&g_nvme_rdma_mr;
+}
+
+struct nvme_rdma_ut_bdev_io {
+ struct iovec iovs[NVME_RDMA_MAX_SGL_DESCRIPTORS];
+ int iovpos;
+};
+
+/* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */
+static void nvme_rdma_ut_reset_sgl(void *cb_arg, uint32_t offset)
+{
+ struct nvme_rdma_ut_bdev_io *bio = cb_arg;
+ struct iovec *iov;
+
+ for (bio->iovpos = 0; bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
+ iov = &bio->iovs[bio->iovpos];
+ /* Only provide offsets at the beginning of an iov */
+ if (offset == 0) {
+ break;
+ }
+
+ offset -= iov->iov_len;
+ }
+
+ SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
+}
+
+static int nvme_rdma_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
+{
+ struct nvme_rdma_ut_bdev_io *bio = cb_arg;
+ struct iovec *iov;
+
+ SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
+
+ iov = &bio->iovs[bio->iovpos];
+
+ *address = iov->iov_base;
+ *length = iov->iov_len;
+ bio->iovpos++;
+
+ return 0;
+}
+
+static void
+test_nvme_rdma_build_sgl_request(void)
+{
+ struct nvme_rdma_qpair rqpair;
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct spdk_nvmf_cmd cmd = {{0}};
+ struct spdk_nvme_rdma_req rdma_req = {0};
+ struct nvme_request req = {{0}};
+ struct nvme_rdma_ut_bdev_io bio;
+ struct spdk_nvme_rdma_mr_map rmap = {0};
+ struct spdk_mem_map *map = NULL;
+ uint64_t i;
+ int rc;
+
+ rmap.map = map;
+
+ ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
+
+ rqpair.mr_map = &rmap;
+ rqpair.qpair.ctrlr = &ctrlr;
+ rqpair.cmds = &cmd;
+ cmd.sgl[0].address = 0x1111;
+
+ rdma_req.id = 0;
+ rdma_req.req = &req;
+
+ req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
+ req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
+ req.payload.contig_or_cb_arg = &bio;
+ req.qpair = &rqpair.qpair;
+
+ g_nvme_rdma_mr.rkey = 1;
+
+ for (i = 0; i < NVME_RDMA_MAX_SGL_DESCRIPTORS; i++) {
+ bio.iovs[i].iov_base = (void *)i;
+ bio.iovs[i].iov_len = 0;
+ }
+
+ /* Test case 1: single SGL. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x1000;
+ bio.iovs[0].iov_len = 0x1000;
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 1);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == g_nvme_rdma_mr.rkey);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)bio.iovs[0].iov_base);
+ CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
+
+ /* Test case 2: multiple SGL. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x4000;
+ for (i = 0; i < 4; i++) {
+ bio.iovs[i].iov_len = 0x1000;
+ }
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 4);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 4 * sizeof(struct spdk_nvme_sgl_descriptor));
+ CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)0);
+ CU_ASSERT(rdma_req.send_sgl[0].length == 4 * sizeof(struct spdk_nvme_sgl_descriptor) + sizeof(
+ struct spdk_nvme_cmd))
+ for (i = 0; i < 4; i++) {
+ CU_ASSERT(cmd.sgl[i].keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
+ CU_ASSERT(cmd.sgl[i].keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
+ CU_ASSERT(cmd.sgl[i].keyed.length == bio.iovs[i].iov_len);
+ CU_ASSERT(cmd.sgl[i].keyed.key == g_nvme_rdma_mr.rkey);
+ CU_ASSERT(cmd.sgl[i].address == (uint64_t)bio.iovs[i].iov_base);
+ }
+
+ /* Test case 3: Multiple SGL, SGL larger than mr size. Expected: FAIL */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ g_mr_size = 0x500;
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc != 0);
+ CU_ASSERT(bio.iovpos == 1);
+
+ /* Test case 4: Multiple SGL, SGL size smaller than I/O size */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x6000;
+ g_mr_size = 0x0;
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc != 0);
+ CU_ASSERT(bio.iovpos == NVME_RDMA_MAX_SGL_DESCRIPTORS);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme_rdma", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (CU_add_test(suite, "build_sgl_request", test_nvme_rdma_build_sgl_request) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}