summaryrefslogtreecommitdiffstats
path: root/src/spdk/test/unit/lib/nvmf
diff options
context:
space:
mode:
Diffstat (limited to 'src/spdk/test/unit/lib/nvmf')
-rw-r--r--src/spdk/test/unit/lib/nvmf/Makefile48
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c1711
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c415
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut.c303
-rw-r--r--src/spdk/test/unit/lib/nvmf/fc.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/fc.c/Makefile58
-rw-r--r--src/spdk/test/unit/lib/nvmf/fc.c/fc_ut.c505
-rw-r--r--src/spdk/test/unit/lib/nvmf/fc_ls.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/fc_ls.c/Makefile45
-rw-r--r--src/spdk/test/unit/lib/nvmf/fc_ls.c/fc_ls_ut.c1070
-rw-r--r--src/spdk/test/unit/lib/nvmf/rdma.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/rdma.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvmf/rdma.c/rdma_ut.c1283
-rw-r--r--src/spdk/test/unit/lib/nvmf/subsystem.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/subsystem.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/nvmf/subsystem.c/subsystem_ut.c1342
-rw-r--r--src/spdk/test/unit/lib/nvmf/tcp.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/tcp.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvmf/tcp.c/tcp_ut.c722
25 files changed, 7740 insertions, 0 deletions
diff --git a/src/spdk/test/unit/lib/nvmf/Makefile b/src/spdk/test/unit/lib/nvmf/Makefile
new file mode 100644
index 000000000..94d5dde63
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/Makefile
@@ -0,0 +1,48 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = tcp.c ctrlr.c subsystem.c ctrlr_discovery.c ctrlr_bdev.c
+
+DIRS-$(CONFIG_RDMA) += rdma.c
+
+DIRS-$(CONFIG_FC) += fc.c fc_ls.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr.c/.gitignore b/src/spdk/test/unit/lib/nvmf/ctrlr.c/.gitignore
new file mode 100644
index 000000000..65e849431
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr.c/.gitignore
@@ -0,0 +1 @@
+ctrlr_ut
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr.c/Makefile b/src/spdk/test/unit/lib/nvmf/ctrlr.c/Makefile
new file mode 100644
index 000000000..c68c589ab
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ctrlr_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c b/src/spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c
new file mode 100644
index 000000000..1da8f9d54
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c
@@ -0,0 +1,1711 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "spdk_internal/mock.h"
+#include "spdk_internal/thread.h"
+
+#include "common/lib/ut_multithread.c"
+#include "nvmf/ctrlr.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+
+struct spdk_bdev {
+ int ut_mock;
+ uint64_t blockcnt;
+};
+
+const char subsystem_default_sn[SPDK_NVME_CTRLR_SN_LEN + 1] = "subsys_default_sn";
+const char subsystem_default_mn[SPDK_NVME_CTRLR_MN_LEN + 1] = "subsys_default_mn";
+
+DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
+ struct spdk_nvmf_subsystem *,
+ (struct spdk_nvmf_tgt *tgt, const char *subnqn),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_poll_group_create,
+ struct spdk_nvmf_poll_group *,
+ (struct spdk_nvmf_tgt *tgt),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_sn,
+ const char *,
+ (const struct spdk_nvmf_subsystem *subsystem),
+ subsystem_default_sn);
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_mn,
+ const char *,
+ (const struct spdk_nvmf_subsystem *subsystem),
+ subsystem_default_mn);
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns,
+ struct spdk_nvmf_ns *,
+ (struct spdk_nvmf_subsystem *subsystem),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns,
+ struct spdk_nvmf_ns *,
+ (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
+ bool,
+ (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
+ true);
+
+DEFINE_STUB(nvmf_subsystem_add_ctrlr,
+ int,
+ (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
+ 0);
+
+DEFINE_STUB(nvmf_subsystem_get_ctrlr,
+ struct spdk_nvmf_ctrlr *,
+ (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
+ NULL);
+
+DEFINE_STUB(nvmf_ctrlr_dsm_supported,
+ bool,
+ (struct spdk_nvmf_ctrlr *ctrlr),
+ false);
+
+DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
+ bool,
+ (struct spdk_nvmf_ctrlr *ctrlr),
+ false);
+
+DEFINE_STUB_V(nvmf_get_discovery_log_page,
+ (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
+ uint32_t iovcnt, uint64_t offset, uint32_t length));
+
+DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
+ int,
+ (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
+ 0);
+
+DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
+ bool,
+ (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
+ true);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_transport_req_complete,
+ int,
+ (struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
+
+DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx, bool,
+ (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd,
+ struct spdk_dif_ctx *dif_ctx),
+ true);
+
+DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
+ (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
+
+DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
+DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
+
+int
+spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
+{
+ return 0;
+}
+
+void
+nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
+ bool dif_insert_or_strip)
+{
+ uint64_t num_blocks;
+
+ SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
+ num_blocks = ns->bdev->blockcnt;
+ nsdata->nsze = num_blocks;
+ nsdata->ncap = num_blocks;
+ nsdata->nuse = num_blocks;
+ nsdata->nlbaf = 0;
+ nsdata->flbas.format = 0;
+ nsdata->lbaf[0].lbads = spdk_u32log2(512);
+}
+
+static void
+test_get_log_page(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_request req = {};
+ struct spdk_nvmf_qpair qpair = {};
+ struct spdk_nvmf_ctrlr ctrlr = {};
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ char data[4096];
+
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+
+ ctrlr.subsys = &subsystem;
+
+ qpair.ctrlr = &ctrlr;
+
+ req.qpair = &qpair;
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+ req.data = &data;
+ req.length = sizeof(data);
+
+ /* Get Log Page - all valid */
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
+ cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
+ cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1);
+ CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+
+ /* Get Log Page with invalid log ID */
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
+ cmd.nvme_cmd.cdw10 = 0;
+ CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+ /* Get Log Page with invalid offset (not dword aligned) */
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
+ cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
+ cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1);
+ cmd.nvme_cmd.cdw12 = 2;
+ CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+ /* Get Log Page without data buffer */
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ req.data = NULL;
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
+ cmd.nvme_cmd.cdw10_bits.get_log_page.lid = SPDK_NVME_LOG_ERROR;
+ cmd.nvme_cmd.cdw10_bits.get_log_page.numdl = (req.length / 4 - 1);
+ CU_ASSERT(nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+ req.data = data;
+}
+
+static void
+test_process_fabrics_cmd(void)
+{
+ struct spdk_nvmf_request req = {};
+ int ret;
+ struct spdk_nvmf_qpair req_qpair = {};
+ union nvmf_h2c_msg req_cmd = {};
+ union nvmf_c2h_msg req_rsp = {};
+
+ req.qpair = &req_qpair;
+ req.cmd = &req_cmd;
+ req.rsp = &req_rsp;
+ req.qpair->ctrlr = NULL;
+
+ /* No ctrlr and invalid command check */
+ req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
+ ret = nvmf_ctrlr_process_fabrics_cmd(&req);
+ CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
+ CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+}
+
+static bool
+nvme_status_success(const struct spdk_nvme_status *status)
+{
+ return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS;
+}
+
+static void
+test_connect(void)
+{
+ struct spdk_nvmf_fabric_connect_data connect_data;
+ struct spdk_nvmf_poll_group group;
+ struct spdk_nvmf_subsystem_poll_group *sgroups;
+ struct spdk_nvmf_transport transport;
+ struct spdk_nvmf_transport_ops tops = {};
+ struct spdk_nvmf_subsystem subsystem;
+ struct spdk_nvmf_request req;
+ struct spdk_nvmf_qpair admin_qpair;
+ struct spdk_nvmf_qpair qpair;
+ struct spdk_nvmf_qpair qpair2;
+ struct spdk_nvmf_ctrlr ctrlr;
+ struct spdk_nvmf_tgt tgt;
+ union nvmf_h2c_msg cmd;
+ union nvmf_c2h_msg rsp;
+ const uint8_t hostid[16] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
+ };
+ const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
+ const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
+ int rc;
+
+ memset(&group, 0, sizeof(group));
+ group.thread = spdk_get_thread();
+
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.subsys = &subsystem;
+ ctrlr.qpair_mask = spdk_bit_array_create(3);
+ SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL);
+ ctrlr.vcprop.cc.bits.en = 1;
+ ctrlr.vcprop.cc.bits.iosqes = 6;
+ ctrlr.vcprop.cc.bits.iocqes = 4;
+
+ memset(&admin_qpair, 0, sizeof(admin_qpair));
+ admin_qpair.group = &group;
+
+ memset(&tgt, 0, sizeof(tgt));
+ memset(&transport, 0, sizeof(transport));
+ transport.ops = &tops;
+ transport.opts.max_aq_depth = 32;
+ transport.opts.max_queue_depth = 64;
+ transport.opts.max_qpairs_per_ctrlr = 3;
+ transport.tgt = &tgt;
+
+ memset(&qpair, 0, sizeof(qpair));
+ qpair.transport = &transport;
+ qpair.group = &group;
+ qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
+ TAILQ_INIT(&qpair.outstanding);
+
+ memset(&connect_data, 0, sizeof(connect_data));
+ memcpy(connect_data.hostid, hostid, sizeof(hostid));
+ connect_data.cntlid = 0xFFFF;
+ snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
+ snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
+
+ memset(&subsystem, 0, sizeof(subsystem));
+ subsystem.thread = spdk_get_thread();
+ subsystem.id = 1;
+ TAILQ_INIT(&subsystem.ctrlrs);
+ subsystem.tgt = &tgt;
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+ subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+ snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
+
+ sgroups = calloc(subsystem.id + 1, sizeof(struct spdk_nvmf_subsystem_poll_group));
+ group.sgroups = sgroups;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
+ cmd.connect_cmd.cid = 1;
+ cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
+ cmd.connect_cmd.recfmt = 0;
+ cmd.connect_cmd.qid = 0;
+ cmd.connect_cmd.sqsize = 31;
+ cmd.connect_cmd.cattr = 0;
+ cmd.connect_cmd.kato = 120000;
+
+ memset(&req, 0, sizeof(req));
+ req.qpair = &qpair;
+ req.length = sizeof(connect_data);
+ req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
+ req.data = &connect_data;
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+
+ MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
+ MOCK_SET(spdk_nvmf_poll_group_create, &group);
+
+ /* Valid admin connect command */
+ memset(&rsp, 0, sizeof(rsp));
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+ CU_ASSERT(qpair.ctrlr != NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
+ spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
+ free(qpair.ctrlr);
+ qpair.ctrlr = NULL;
+
+ /* Valid admin connect command with kato = 0 */
+ cmd.connect_cmd.kato = 0;
+ memset(&rsp, 0, sizeof(rsp));
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+ CU_ASSERT(qpair.ctrlr != NULL && qpair.ctrlr->keep_alive_poller == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
+ free(qpair.ctrlr);
+ qpair.ctrlr = NULL;
+ cmd.connect_cmd.kato = 120000;
+
+ /* Invalid data length */
+ memset(&rsp, 0, sizeof(rsp));
+ req.length = sizeof(connect_data) - 1;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ req.length = sizeof(connect_data);
+
+ /* Invalid recfmt */
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.connect_cmd.recfmt = 1234;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ cmd.connect_cmd.recfmt = 0;
+
+ /* Subsystem not found */
+ memset(&rsp, 0, sizeof(rsp));
+ MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL);
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
+
+ /* Unterminated hostnqn */
+ memset(&rsp, 0, sizeof(rsp));
+ memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn));
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
+
+ /* Host not allowed */
+ memset(&rsp, 0, sizeof(rsp));
+ MOCK_SET(spdk_nvmf_subsystem_host_allowed, false);
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ MOCK_SET(spdk_nvmf_subsystem_host_allowed, true);
+
+ /* Invalid sqsize == 0 */
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.connect_cmd.sqsize = 0;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ cmd.connect_cmd.sqsize = 31;
+
+ /* Invalid admin sqsize > max_aq_depth */
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.connect_cmd.sqsize = 32;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ cmd.connect_cmd.sqsize = 31;
+
+ /* Invalid I/O sqsize > max_queue_depth */
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.connect_cmd.qid = 1;
+ cmd.connect_cmd.sqsize = 64;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ cmd.connect_cmd.qid = 0;
+ cmd.connect_cmd.sqsize = 31;
+
+ /* Invalid cntlid for admin queue */
+ memset(&rsp, 0, sizeof(rsp));
+ connect_data.cntlid = 0x1234;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ connect_data.cntlid = 0xFFFF;
+
+ ctrlr.admin_qpair = &admin_qpair;
+ ctrlr.subsys = &subsystem;
+
+ /* Valid I/O queue connect command */
+ memset(&rsp, 0, sizeof(rsp));
+ MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
+ cmd.connect_cmd.qid = 1;
+ cmd.connect_cmd.sqsize = 63;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+ CU_ASSERT(qpair.ctrlr == &ctrlr);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ qpair.ctrlr = NULL;
+ cmd.connect_cmd.sqsize = 31;
+
+ /* Non-existent controller */
+ memset(&rsp, 0, sizeof(rsp));
+ MOCK_SET(nvmf_subsystem_get_ctrlr, NULL);
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ MOCK_SET(nvmf_subsystem_get_ctrlr, &ctrlr);
+
+ /* I/O connect to discovery controller */
+ memset(&rsp, 0, sizeof(rsp));
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
+ subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+
+ /* I/O connect to discovery controller with keep-alive-timeout != 0 */
+ cmd.connect_cmd.qid = 0;
+ cmd.connect_cmd.kato = 120000;
+ memset(&rsp, 0, sizeof(rsp));
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
+ subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+ CU_ASSERT(qpair.ctrlr != NULL);
+ CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
+ spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
+ free(qpair.ctrlr);
+ qpair.ctrlr = NULL;
+
+ /* I/O connect to discovery controller with keep-alive-timeout == 0.
+ * Then, a fixed timeout value is set to keep-alive-timeout.
+ */
+ cmd.connect_cmd.kato = 0;
+ memset(&rsp, 0, sizeof(rsp));
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
+ subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+ CU_ASSERT(qpair.ctrlr != NULL);
+ CU_ASSERT(qpair.ctrlr->keep_alive_poller != NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ nvmf_ctrlr_stop_keep_alive_timer(qpair.ctrlr);
+ spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
+ free(qpair.ctrlr);
+ qpair.ctrlr = NULL;
+ cmd.connect_cmd.qid = 1;
+ cmd.connect_cmd.kato = 120000;
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+
+ /* I/O connect to disabled controller */
+ memset(&rsp, 0, sizeof(rsp));
+ ctrlr.vcprop.cc.bits.en = 0;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ ctrlr.vcprop.cc.bits.en = 1;
+
+ /* I/O connect with invalid IOSQES */
+ memset(&rsp, 0, sizeof(rsp));
+ ctrlr.vcprop.cc.bits.iosqes = 3;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ ctrlr.vcprop.cc.bits.iosqes = 6;
+
+ /* I/O connect with invalid IOCQES */
+ memset(&rsp, 0, sizeof(rsp));
+ ctrlr.vcprop.cc.bits.iocqes = 3;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ ctrlr.vcprop.cc.bits.iocqes = 4;
+
+ /* I/O connect with too many existing qpairs */
+ memset(&rsp, 0, sizeof(rsp));
+ spdk_bit_array_set(ctrlr.qpair_mask, 0);
+ spdk_bit_array_set(ctrlr.qpair_mask, 1);
+ spdk_bit_array_set(ctrlr.qpair_mask, 2);
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+ spdk_bit_array_clear(ctrlr.qpair_mask, 0);
+ spdk_bit_array_clear(ctrlr.qpair_mask, 1);
+ spdk_bit_array_clear(ctrlr.qpair_mask, 2);
+
+ /* I/O connect with duplicate queue ID */
+ memset(&rsp, 0, sizeof(rsp));
+ memset(&qpair2, 0, sizeof(qpair2));
+ qpair2.group = &group;
+ qpair2.qid = 1;
+ spdk_bit_array_set(ctrlr.qpair_mask, 1);
+ cmd.connect_cmd.qid = 1;
+ sgroups[subsystem.id].io_outstanding++;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+ rc = nvmf_ctrlr_cmd_connect(&req);
+ poll_threads();
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ CU_ASSERT(sgroups[subsystem.id].io_outstanding == 0);
+
+ /* Clean up globals */
+ MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem);
+ MOCK_CLEAR(spdk_nvmf_poll_group_create);
+
+ spdk_bit_array_free(&ctrlr.qpair_mask);
+ free(sgroups);
+}
+
+static void
+test_get_ns_id_desc_list(void)
+{
+ struct spdk_nvmf_subsystem subsystem;
+ struct spdk_nvmf_qpair qpair;
+ struct spdk_nvmf_ctrlr ctrlr;
+ struct spdk_nvmf_request req;
+ struct spdk_nvmf_ns *ns_ptrs[1];
+ struct spdk_nvmf_ns ns;
+ union nvmf_h2c_msg cmd;
+ union nvmf_c2h_msg rsp;
+ struct spdk_bdev bdev;
+ uint8_t buf[4096];
+
+ memset(&subsystem, 0, sizeof(subsystem));
+ ns_ptrs[0] = &ns;
+ subsystem.ns = ns_ptrs;
+ subsystem.max_nsid = 1;
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+
+ memset(&ns, 0, sizeof(ns));
+ ns.opts.nsid = 1;
+ ns.bdev = &bdev;
+
+ memset(&qpair, 0, sizeof(qpair));
+ qpair.ctrlr = &ctrlr;
+
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.subsys = &subsystem;
+ ctrlr.vcprop.cc.bits.en = 1;
+
+ memset(&req, 0, sizeof(req));
+ req.qpair = &qpair;
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+ req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
+ req.data = buf;
+ req.length = sizeof(buf);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
+ cmd.nvme_cmd.cdw10_bits.identify.cns = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST;
+
+ /* Invalid NSID */
+ cmd.nvme_cmd.nsid = 0;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
+
+ /* Valid NSID, but ns has no IDs defined */
+ cmd.nvme_cmd.nsid = 1;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf)));
+
+ /* Valid NSID, only EUI64 defined */
+ ns.opts.eui64[0] = 0x11;
+ ns.opts.eui64[7] = 0xFF;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
+ CU_ASSERT(buf[1] == 8);
+ CU_ASSERT(buf[4] == 0x11);
+ CU_ASSERT(buf[11] == 0xFF);
+ CU_ASSERT(buf[13] == 0);
+
+ /* Valid NSID, only NGUID defined */
+ memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64));
+ ns.opts.nguid[0] = 0x22;
+ ns.opts.nguid[15] = 0xEE;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID);
+ CU_ASSERT(buf[1] == 16);
+ CU_ASSERT(buf[4] == 0x22);
+ CU_ASSERT(buf[19] == 0xEE);
+ CU_ASSERT(buf[21] == 0);
+
+ /* Valid NSID, both EUI64 and NGUID defined */
+ ns.opts.eui64[0] = 0x11;
+ ns.opts.eui64[7] = 0xFF;
+ ns.opts.nguid[0] = 0x22;
+ ns.opts.nguid[15] = 0xEE;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
+ CU_ASSERT(buf[1] == 8);
+ CU_ASSERT(buf[4] == 0x11);
+ CU_ASSERT(buf[11] == 0xFF);
+ CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
+ CU_ASSERT(buf[13] == 16);
+ CU_ASSERT(buf[16] == 0x22);
+ CU_ASSERT(buf[31] == 0xEE);
+ CU_ASSERT(buf[33] == 0);
+
+ /* Valid NSID, EUI64, NGUID, and UUID defined */
+ ns.opts.eui64[0] = 0x11;
+ ns.opts.eui64[7] = 0xFF;
+ ns.opts.nguid[0] = 0x22;
+ ns.opts.nguid[15] = 0xEE;
+ ns.opts.uuid.u.raw[0] = 0x33;
+ ns.opts.uuid.u.raw[15] = 0xDD;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
+ CU_ASSERT(buf[1] == 8);
+ CU_ASSERT(buf[4] == 0x11);
+ CU_ASSERT(buf[11] == 0xFF);
+ CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
+ CU_ASSERT(buf[13] == 16);
+ CU_ASSERT(buf[16] == 0x22);
+ CU_ASSERT(buf[31] == 0xEE);
+ CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID);
+ CU_ASSERT(buf[33] == 16);
+ CU_ASSERT(buf[36] == 0x33);
+ CU_ASSERT(buf[51] == 0xDD);
+ CU_ASSERT(buf[53] == 0);
+}
+
+static void
+test_identify_ns(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_transport transport = {};
+ struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
+ struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
+ struct spdk_nvme_cmd cmd = {};
+ struct spdk_nvme_cpl rsp = {};
+ struct spdk_nvme_ns_data nsdata = {};
+ struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}};
+ struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}};
+ struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
+
+ subsystem.ns = ns_arr;
+ subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
+
+ /* Invalid NSID 0 */
+ cmd.nsid = 0;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
+ CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
+
+ /* Valid NSID 1 */
+ cmd.nsid = 1;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(nsdata.nsze == 1234);
+
+ /* Valid but inactive NSID 2 */
+ cmd.nsid = 2;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
+
+ /* Valid NSID 3 */
+ cmd.nsid = 3;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(nsdata.nsze == 5678);
+
+ /* Invalid NSID 4 */
+ cmd.nsid = 4;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
+ CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
+
+ /* Invalid NSID 0xFFFFFFFF (NS management not supported) */
+ cmd.nsid = 0xFFFFFFFF;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
+ CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
+}
+
+static void
+test_set_get_features(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_qpair admin_qpair = {};
+ struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ struct spdk_nvmf_ns ns[3];
+ struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};;
+ struct spdk_nvmf_request req;
+ int rc;
+
+ subsystem.ns = ns_arr;
+ subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
+ admin_qpair.ctrlr = &ctrlr;
+ req.qpair = &admin_qpair;
+ cmd.nvme_cmd.nsid = 1;
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+
+ /* Set SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.nvme_cmd.cdw11_bits.feat_rsv_persistence.bits.ptpl = 1;
+ ns[0].ptpl_file = "testcfg";
+ rc = nvmf_ctrlr_set_features_reservation_persistence(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_FEATURE_ID_NOT_SAVEABLE);
+ CU_ASSERT(ns[0].ptpl_activated == true);
+
+ /* Get SPDK_NVME_FEAT_HOST_RESERVE_PERSIST feature */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
+ cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_HOST_RESERVE_PERSIST;
+ rc = nvmf_ctrlr_get_features_reservation_persistence(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(rsp.nvme_cpl.cdw0 == 1);
+
+
+ /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
+ cmd.nvme_cmd.cdw11 = 0x42;
+ cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
+
+ rc = nvmf_ctrlr_get_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+
+ /* Get SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_FEATURES;
+ cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
+ cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
+
+ rc = nvmf_ctrlr_get_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+ /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - valid TMPSEL */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.nvme_cmd.cdw11 = 0x42;
+ cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
+
+ rc = nvmf_ctrlr_set_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+
+ /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid TMPSEL */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.nvme_cmd.cdw11 = 0x42 | 1 << 16 | 1 << 19; /* Set reserved value */
+ cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
+
+ rc = nvmf_ctrlr_set_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+ /* Set SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD - invalid THSEL */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.nvme_cmd.cdw11 = 0x42;
+ cmd.nvme_cmd.cdw11_bits.feat_temp_threshold.bits.thsel = 0x3; /* Set reserved value */
+ cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_TEMPERATURE_THRESHOLD;
+
+ rc = nvmf_ctrlr_set_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+
+ /* get SPDK_NVME_FEAT_ERROR_RECOVERY - generic */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.nvme_cmd.cdw10_bits.get_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
+
+ rc = nvmf_ctrlr_get_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+
+ /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE set */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.nvme_cmd.cdw11 = 0x42;
+ cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x1;
+ cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
+
+ rc = nvmf_ctrlr_set_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+ /* Set SPDK_NVME_FEAT_ERROR_RECOVERY - DULBE cleared */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ cmd.nvme_cmd.cdw11 = 0x42;
+ cmd.nvme_cmd.cdw11_bits.feat_error_recovery.bits.dulbe = 0x0;
+ cmd.nvme_cmd.cdw10_bits.set_features.fid = SPDK_NVME_FEAT_ERROR_RECOVERY;
+
+ rc = nvmf_ctrlr_set_features(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+}
+
+/*
+ * Reservation Unit Test Configuration
+ * -------- -------- --------
+ * | Host A | | Host B | | Host C |
+ * -------- -------- --------
+ * / \ | |
+ * -------- -------- ------- -------
+ * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C|
+ * -------- -------- ------- -------
+ * \ \ / /
+ * \ \ / /
+ * \ \ / /
+ * --------------------------------------
+ * | NAMESPACE 1 |
+ * --------------------------------------
+ */
+
+static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
+struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
+
+static void
+ut_reservation_init(enum spdk_nvme_reservation_type rtype)
+{
+ /* Host A has two controllers */
+ spdk_uuid_generate(&g_ctrlr1_A.hostid);
+ spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
+
+ /* Host B has 1 controller */
+ spdk_uuid_generate(&g_ctrlr_B.hostid);
+
+ /* Host C has 1 controller */
+ spdk_uuid_generate(&g_ctrlr_C.hostid);
+
+ memset(&g_ns_info, 0, sizeof(g_ns_info));
+ g_ns_info.rtype = rtype;
+ g_ns_info.reg_hostid[0] = g_ctrlr1_A.hostid;
+ g_ns_info.reg_hostid[1] = g_ctrlr_B.hostid;
+ g_ns_info.reg_hostid[2] = g_ctrlr_C.hostid;
+}
+
+static void
+test_reservation_write_exclusive(void)
+{
+ struct spdk_nvmf_request req = {};
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ int rc;
+
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+
+ /* Host A holds reservation with type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE */
+ ut_reservation_init(SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
+ g_ns_info.holder_id = g_ctrlr1_A.hostid;
+
+ /* Test Case: Issue a Read command from Host A and Host B */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Test Case: Issue a DSM Write command from Host A and Host B */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+
+ /* Test Case: Issue a Write command from Host C */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+
+ /* Test Case: Issue a Read command from Host B */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Unregister Host C */
+ memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
+
+ /* Test Case: Read and Write commands from non-registrant Host C */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+}
+
+static void
+test_reservation_exclusive_access(void)
+{
+ struct spdk_nvmf_request req = {};
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ int rc;
+
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+
+ /* Host A holds reservation with type SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS */
+ ut_reservation_init(SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS);
+ g_ns_info.holder_id = g_ctrlr1_A.hostid;
+
+ /* Test Case: Issue a Read command from Host B */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+
+ /* Test Case: Issue a Reservation Release command from a valid Registrant */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_RESERVATION_RELEASE;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+}
+
+static void
+_test_reservation_write_exclusive_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
+{
+ struct spdk_nvmf_request req = {};
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ int rc;
+
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+
+ /* SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY and SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS */
+ ut_reservation_init(rtype);
+ g_ns_info.holder_id = g_ctrlr1_A.hostid;
+
+ /* Test Case: Issue a Read command from Host A and Host C */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Test Case: Issue a DSM Write command from Host A and Host C */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_DATASET_MANAGEMENT;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr1_A, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Unregister Host C */
+ memset(&g_ns_info.reg_hostid[2], 0, sizeof(struct spdk_uuid));
+
+ /* Test Case: Read and Write commands from non-registrant Host C */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_C, &req);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+}
+
+static void
+test_reservation_write_exclusive_regs_only_and_all_regs(void)
+{
+ _test_reservation_write_exclusive_regs_only_and_all_regs(
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
+ _test_reservation_write_exclusive_regs_only_and_all_regs(
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
+}
+
+static void
+_test_reservation_exclusive_access_regs_only_and_all_regs(enum spdk_nvme_reservation_type rtype)
+{
+ struct spdk_nvmf_request req = {};
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ int rc;
+
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+
+ /* SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY and SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS */
+ ut_reservation_init(rtype);
+ g_ns_info.holder_id = g_ctrlr1_A.hostid;
+
+ /* Test Case: Issue a Write command from Host B */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ /* Unregister Host B */
+ memset(&g_ns_info.reg_hostid[1], 0, sizeof(struct spdk_uuid));
+
+ /* Test Case: Issue a Read command from Host B */
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_READ;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_WRITE;
+ rc = nvmf_ns_reservation_request_check(&g_ns_info, &g_ctrlr_B, &req);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ SPDK_CU_ASSERT_FATAL(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+}
+
+static void
+test_reservation_exclusive_access_regs_only_and_all_regs(void)
+{
+ _test_reservation_exclusive_access_regs_only_and_all_regs(
+ SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY);
+ _test_reservation_exclusive_access_regs_only_and_all_regs(
+ SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_ALL_REGS);
+}
+
+static void
+test_reservation_notification_log_page(void)
+{
+ struct spdk_nvmf_ctrlr ctrlr;
+ struct spdk_nvmf_qpair qpair;
+ struct spdk_nvmf_ns ns;
+ struct spdk_nvmf_request req;
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ union spdk_nvme_async_event_completion event = {};
+ struct spdk_nvme_reservation_notification_log logs[3];
+
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.thread = spdk_get_thread();
+ TAILQ_INIT(&ctrlr.log_head);
+ ns.nsid = 1;
+
+ /* Test Case: Mask all the reservation notifications */
+ ns.mask = SPDK_NVME_REGISTRATION_PREEMPTED_MASK |
+ SPDK_NVME_RESERVATION_RELEASED_MASK |
+ SPDK_NVME_RESERVATION_PREEMPTED_MASK;
+ nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
+ SPDK_NVME_REGISTRATION_PREEMPTED);
+ nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
+ SPDK_NVME_RESERVATION_RELEASED);
+ nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
+ SPDK_NVME_RESERVATION_PREEMPTED);
+ poll_threads();
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&ctrlr.log_head));
+
+ /* Test Case: Unmask all the reservation notifications,
+ * 3 log pages are generated, and AER was triggered.
+ */
+ ns.mask = 0;
+ ctrlr.num_avail_log_pages = 0;
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+ ctrlr.aer_req[0] = &req;
+ ctrlr.nr_aer_reqs = 1;
+ req.qpair = &qpair;
+ TAILQ_INIT(&qpair.outstanding);
+ qpair.ctrlr = NULL;
+ qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req, link);
+
+ nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
+ SPDK_NVME_REGISTRATION_PREEMPTED);
+ nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
+ SPDK_NVME_RESERVATION_RELEASED);
+ nvmf_ctrlr_reservation_notice_log(&ctrlr, &ns,
+ SPDK_NVME_RESERVATION_PREEMPTED);
+ poll_threads();
+ event.raw = rsp.nvme_cpl.cdw0;
+ SPDK_CU_ASSERT_FATAL(event.bits.async_event_type == SPDK_NVME_ASYNC_EVENT_TYPE_IO);
+ SPDK_CU_ASSERT_FATAL(event.bits.async_event_info == SPDK_NVME_ASYNC_EVENT_RESERVATION_LOG_AVAIL);
+ SPDK_CU_ASSERT_FATAL(event.bits.log_page_identifier == SPDK_NVME_LOG_RESERVATION_NOTIFICATION);
+ SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 3);
+
+ /* Test Case: Get Log Page to clear the log pages */
+ nvmf_get_reservation_notification_log_page(&ctrlr, (void *)logs, 0, sizeof(logs));
+ SPDK_CU_ASSERT_FATAL(ctrlr.num_avail_log_pages == 0);
+}
+
+static void
+test_get_dif_ctx(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_request req = {};
+ struct spdk_nvmf_qpair qpair = {};
+ struct spdk_nvmf_ctrlr ctrlr = {};
+ struct spdk_nvmf_ns ns = {};
+ struct spdk_nvmf_ns *_ns = NULL;
+ struct spdk_bdev bdev = {};
+ union nvmf_h2c_msg cmd = {};
+ struct spdk_dif_ctx dif_ctx = {};
+ bool ret;
+
+ ctrlr.subsys = &subsystem;
+
+ qpair.ctrlr = &ctrlr;
+
+ req.qpair = &qpair;
+ req.cmd = &cmd;
+
+ ns.bdev = &bdev;
+
+ ctrlr.dif_insert_or_strip = false;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ ctrlr.dif_insert_or_strip = true;
+ qpair.state = SPDK_NVMF_QPAIR_UNINITIALIZED;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
+ cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FABRIC;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_FLUSH;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ qpair.qid = 1;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ cmd.nvme_cmd.nsid = 1;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ subsystem.max_nsid = 1;
+ subsystem.ns = &_ns;
+ subsystem.ns[0] = &ns;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ cmd.nvmf_cmd.opcode = SPDK_NVME_OPC_WRITE;
+
+ ret = spdk_nvmf_request_get_dif_ctx(&req, &dif_ctx);
+ CU_ASSERT(ret == true);
+}
+
+static void
+test_identify_ctrlr(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {
+ .subtype = SPDK_NVMF_SUBTYPE_NVME
+ };
+ struct spdk_nvmf_transport_ops tops = {};
+ struct spdk_nvmf_transport transport = {
+ .ops = &tops,
+ .opts = {
+ .in_capsule_data_size = 4096,
+ },
+ };
+ struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
+ struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
+ struct spdk_nvme_ctrlr_data cdata = {};
+ uint32_t expected_ioccsz;
+
+ nvmf_ctrlr_cdata_init(&transport, &subsystem, &ctrlr.cdata);
+
+ /* Check ioccsz, TCP transport */
+ tops.type = SPDK_NVME_TRANSPORT_TCP;
+ expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
+
+ /* Check ioccsz, RDMA transport */
+ tops.type = SPDK_NVME_TRANSPORT_RDMA;
+ expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
+
+ /* Check ioccsz, TCP transport with dif_insert_or_strip */
+ tops.type = SPDK_NVME_TRANSPORT_TCP;
+ ctrlr.dif_insert_or_strip = true;
+ expected_ioccsz = sizeof(struct spdk_nvme_cmd) / 16 + transport.opts.in_capsule_data_size / 16;
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ctrlr(&ctrlr, &cdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(cdata.nvmf_specific.ioccsz == expected_ioccsz);
+}
+
+static int
+custom_admin_cmd_hdlr(struct spdk_nvmf_request *req)
+{
+ req->rsp->nvme_cpl.status.sc = SPDK_NVME_SC_SUCCESS;
+
+ return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
+};
+
+static void
+test_custom_admin_cmd(void)
+{
+ struct spdk_nvmf_subsystem subsystem;
+ struct spdk_nvmf_qpair qpair;
+ struct spdk_nvmf_ctrlr ctrlr;
+ struct spdk_nvmf_request req;
+ struct spdk_nvmf_ns *ns_ptrs[1];
+ struct spdk_nvmf_ns ns;
+ union nvmf_h2c_msg cmd;
+ union nvmf_c2h_msg rsp;
+ struct spdk_bdev bdev;
+ uint8_t buf[4096];
+ int rc;
+
+ memset(&subsystem, 0, sizeof(subsystem));
+ ns_ptrs[0] = &ns;
+ subsystem.ns = ns_ptrs;
+ subsystem.max_nsid = 1;
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+
+ memset(&ns, 0, sizeof(ns));
+ ns.opts.nsid = 1;
+ ns.bdev = &bdev;
+
+ memset(&qpair, 0, sizeof(qpair));
+ qpair.ctrlr = &ctrlr;
+
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.subsys = &subsystem;
+ ctrlr.vcprop.cc.bits.en = 1;
+
+ memset(&req, 0, sizeof(req));
+ req.qpair = &qpair;
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+ req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
+ req.data = buf;
+ req.length = sizeof(buf);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.nvme_cmd.opc = 0xc1;
+ cmd.nvme_cmd.nsid = 0;
+ memset(&rsp, 0, sizeof(rsp));
+
+ spdk_nvmf_set_custom_admin_cmd_hdlr(cmd.nvme_cmd.opc, custom_admin_cmd_hdlr);
+
+ /* Ensure that our hdlr is being called */
+ rc = nvmf_ctrlr_process_admin_cmd(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+}
+
+static void
+test_fused_compare_and_write(void)
+{
+ struct spdk_nvmf_request req = {};
+ struct spdk_nvmf_qpair qpair = {};
+ struct spdk_nvme_cmd cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ struct spdk_nvmf_ctrlr ctrlr = {};
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_ns ns = {};
+ struct spdk_nvmf_ns *subsys_ns[1] = {};
+ struct spdk_bdev bdev = {};
+
+ struct spdk_nvmf_poll_group group = {};
+ struct spdk_nvmf_subsystem_poll_group sgroups = {};
+ struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
+
+ ns.bdev = &bdev;
+
+ subsystem.id = 0;
+ subsystem.max_nsid = 1;
+ subsys_ns[0] = &ns;
+ subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
+
+ /* Enable controller */
+ ctrlr.vcprop.cc.bits.en = 1;
+ ctrlr.subsys = (struct spdk_nvmf_subsystem *)&subsystem;
+
+ group.num_sgroups = 1;
+ sgroups.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+ sgroups.num_ns = 1;
+ sgroups.ns_info = &ns_info;
+ TAILQ_INIT(&sgroups.queued);
+ group.sgroups = &sgroups;
+ TAILQ_INIT(&qpair.outstanding);
+
+ qpair.ctrlr = &ctrlr;
+ qpair.group = &group;
+ qpair.qid = 1;
+ qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
+
+ cmd.nsid = 1;
+
+ req.qpair = &qpair;
+ req.cmd = (union nvmf_h2c_msg *)&cmd;
+ req.rsp = &rsp;
+
+ /* SUCCESS/SUCCESS */
+ cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
+ cmd.opc = SPDK_NVME_OPC_COMPARE;
+
+ spdk_nvmf_request_exec(&req);
+ CU_ASSERT(qpair.first_fused_req != NULL);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+
+ cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
+ cmd.opc = SPDK_NVME_OPC_WRITE;
+
+ spdk_nvmf_request_exec(&req);
+ CU_ASSERT(qpair.first_fused_req == NULL);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+
+ /* Wrong sequence */
+ cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
+ cmd.opc = SPDK_NVME_OPC_WRITE;
+
+ spdk_nvmf_request_exec(&req);
+ CU_ASSERT(!nvme_status_success(&rsp.nvme_cpl.status));
+ CU_ASSERT(qpair.first_fused_req == NULL);
+
+ /* Write as FUSE_FIRST (Wrong op code) */
+ cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
+ cmd.opc = SPDK_NVME_OPC_WRITE;
+
+ spdk_nvmf_request_exec(&req);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
+ CU_ASSERT(qpair.first_fused_req == NULL);
+
+ /* Compare as FUSE_SECOND (Wrong op code) */
+ cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
+ cmd.opc = SPDK_NVME_OPC_COMPARE;
+
+ spdk_nvmf_request_exec(&req);
+ CU_ASSERT(qpair.first_fused_req != NULL);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+
+ cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
+ cmd.opc = SPDK_NVME_OPC_COMPARE;
+
+ spdk_nvmf_request_exec(&req);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_OPCODE);
+ CU_ASSERT(qpair.first_fused_req == NULL);
+}
+
+static void
+test_multi_async_event_reqs(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_qpair qpair = {};
+ struct spdk_nvmf_ctrlr ctrlr = {};
+ struct spdk_nvmf_request req[5] = {};
+ struct spdk_nvmf_ns *ns_ptrs[1] = {};
+ struct spdk_nvmf_ns ns = {};
+ union nvmf_h2c_msg cmd[5] = {};
+ union nvmf_c2h_msg rsp[5] = {};
+
+ struct spdk_nvmf_poll_group group = {};
+ struct spdk_nvmf_subsystem_poll_group sgroups = {};
+
+ int i;
+
+ ns_ptrs[0] = &ns;
+ subsystem.ns = ns_ptrs;
+ subsystem.max_nsid = 1;
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+
+ ns.opts.nsid = 1;
+ group.sgroups = &sgroups;
+
+ qpair.ctrlr = &ctrlr;
+ qpair.group = &group;
+ TAILQ_INIT(&qpair.outstanding);
+
+ ctrlr.subsys = &subsystem;
+ ctrlr.vcprop.cc.bits.en = 1;
+
+ for (i = 0; i < 5; i++) {
+ cmd[i].nvme_cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
+ cmd[i].nvme_cmd.nsid = 1;
+ cmd[i].nvme_cmd.cid = i;
+
+ req[i].qpair = &qpair;
+ req[i].cmd = &cmd[i];
+ req[i].rsp = &rsp[i];
+ TAILQ_INSERT_TAIL(&qpair.outstanding, &req[i], link);
+ }
+
+ /* Target can store NVMF_MAX_ASYNC_EVENTS reqs */
+ sgroups.io_outstanding = NVMF_MAX_ASYNC_EVENTS;
+ for (i = 0; i < NVMF_MAX_ASYNC_EVENTS; i++) {
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[i]) == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(ctrlr.nr_aer_reqs == i + 1);
+ }
+ CU_ASSERT(sgroups.io_outstanding == 0);
+
+ /* Exceeding the NVMF_MAX_ASYNC_EVENTS reports error */
+ CU_ASSERT(nvmf_ctrlr_process_admin_cmd(&req[4]) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(ctrlr.nr_aer_reqs == NVMF_MAX_ASYNC_EVENTS);
+ CU_ASSERT(rsp[4].nvme_cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp[4].nvme_cpl.status.sc = SPDK_NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
+
+ /* Test if the aer_reqs keep continuous when abort a req in the middle */
+ CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 2) == true);
+ CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
+ CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
+ CU_ASSERT(ctrlr.aer_req[2] == &req[3]);
+
+ CU_ASSERT(nvmf_qpair_abort_aer(&qpair, 3) == true);
+ CU_ASSERT(ctrlr.aer_req[0] == &req[0]);
+ CU_ASSERT(ctrlr.aer_req[1] == &req[1]);
+ CU_ASSERT(ctrlr.aer_req[2] == NULL);
+ CU_ASSERT(ctrlr.nr_aer_reqs == 2);
+
+ TAILQ_REMOVE(&qpair.outstanding, &req[0], link);
+ TAILQ_REMOVE(&qpair.outstanding, &req[1], link);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+ CU_ADD_TEST(suite, test_get_log_page);
+ CU_ADD_TEST(suite, test_process_fabrics_cmd);
+ CU_ADD_TEST(suite, test_connect);
+ CU_ADD_TEST(suite, test_get_ns_id_desc_list);
+ CU_ADD_TEST(suite, test_identify_ns);
+ CU_ADD_TEST(suite, test_reservation_write_exclusive);
+ CU_ADD_TEST(suite, test_reservation_exclusive_access);
+ CU_ADD_TEST(suite, test_reservation_write_exclusive_regs_only_and_all_regs);
+ CU_ADD_TEST(suite, test_reservation_exclusive_access_regs_only_and_all_regs);
+ CU_ADD_TEST(suite, test_reservation_notification_log_page);
+ CU_ADD_TEST(suite, test_get_dif_ctx);
+ CU_ADD_TEST(suite, test_set_get_features);
+ CU_ADD_TEST(suite, test_identify_ctrlr);
+ CU_ADD_TEST(suite, test_custom_admin_cmd);
+ CU_ADD_TEST(suite, test_fused_compare_and_write);
+ CU_ADD_TEST(suite, test_multi_async_event_reqs);
+
+ allocate_threads(1);
+ set_thread(0);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ free_threads();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/.gitignore b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/.gitignore
new file mode 100644
index 000000000..78fca1017
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/.gitignore
@@ -0,0 +1 @@
+ctrlr_bdev_ut
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/Makefile b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/Makefile
new file mode 100644
index 000000000..1d22f14be
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ctrlr_bdev_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c
new file mode 100644
index 000000000..0df9c983b
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c
@@ -0,0 +1,415 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "spdk_internal/mock.h"
+
+#include "nvmf/ctrlr_bdev.c"
+
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+
+DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), -1);
+
+DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "test");
+
+struct spdk_bdev {
+ uint32_t blocklen;
+ uint64_t num_blocks;
+ uint32_t md_len;
+};
+
+uint32_t
+spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
+{
+ return bdev->blocklen;
+}
+
+uint64_t
+spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
+{
+ return bdev->num_blocks;
+}
+
+uint32_t
+spdk_bdev_get_optimal_io_boundary(const struct spdk_bdev *bdev)
+{
+ abort();
+ return 0;
+}
+
+uint32_t
+spdk_bdev_get_md_size(const struct spdk_bdev *bdev)
+{
+ return bdev->md_len;
+}
+
+DEFINE_STUB(spdk_bdev_comparev_and_writev_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *compare_iov, int compare_iovcnt,
+ struct iovec *write_iov, int write_iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(nvmf_ctrlr_process_io_cmd, int, (struct spdk_nvmf_request *req), 0);
+
+DEFINE_STUB_V(spdk_bdev_io_get_nvme_fused_status, (const struct spdk_bdev_io *bdev_io,
+ uint32_t *cdw0, int *cmp_sct, int *cmp_sc, int *wr_sct, int *wr_sc));
+
+DEFINE_STUB(spdk_bdev_is_md_interleaved, bool, (const struct spdk_bdev *bdev), false);
+
+DEFINE_STUB(spdk_bdev_get_dif_type, enum spdk_dif_type,
+ (const struct spdk_bdev *bdev), SPDK_DIF_DISABLE);
+
+DEFINE_STUB(spdk_bdev_is_dif_head_of_md, bool, (const struct spdk_bdev *bdev), false);
+
+DEFINE_STUB(spdk_bdev_is_dif_check_enabled, bool,
+ (const struct spdk_bdev *bdev, enum spdk_dif_check_type check_type), false);
+
+DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *,
+ (struct spdk_bdev_desc *desc), NULL);
+
+DEFINE_STUB(spdk_bdev_flush_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_bdev_unmap_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_bdev_io_type_supported, bool,
+ (struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type), false);
+
+DEFINE_STUB(spdk_bdev_queue_io_wait, int,
+ (struct spdk_bdev *bdev, struct spdk_io_channel *ch,
+ struct spdk_bdev_io_wait_entry *entry),
+ 0);
+
+DEFINE_STUB(spdk_bdev_write_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_bdev_writev_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_bdev_read_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_bdev_readv_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_bdev_write_zeroes_blocks, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB(spdk_bdev_nvme_io_passthru, int,
+ (struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ const struct spdk_nvme_cmd *cmd, void *buf, size_t nbytes,
+ spdk_bdev_io_completion_cb cb, void *cb_arg),
+ 0);
+
+DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io));
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_nqn, const char *,
+ (const struct spdk_nvmf_subsystem *subsystem), NULL);
+
+struct spdk_nvmf_ns *
+spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
+{
+ abort();
+ return NULL;
+}
+
+struct spdk_nvmf_ns *
+spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
+{
+ abort();
+ return NULL;
+}
+
+struct spdk_nvmf_ns *
+spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns)
+{
+ abort();
+ return NULL;
+}
+
+DEFINE_STUB_V(spdk_bdev_io_get_nvme_status,
+ (const struct spdk_bdev_io *bdev_io, uint32_t *cdw0, int *sct, int *sc));
+
+int
+spdk_dif_ctx_init(struct spdk_dif_ctx *ctx, uint32_t block_size, uint32_t md_size,
+ bool md_interleave, bool dif_loc, enum spdk_dif_type dif_type, uint32_t dif_flags,
+ uint32_t init_ref_tag, uint16_t apptag_mask, uint16_t app_tag,
+ uint32_t data_offset, uint16_t guard_seed)
+{
+ ctx->block_size = block_size;
+ ctx->md_size = md_size;
+ ctx->init_ref_tag = init_ref_tag;
+
+ return 0;
+}
+
+static void
+test_get_rw_params(void)
+{
+ struct spdk_nvme_cmd cmd = {0};
+ uint64_t lba;
+ uint64_t count;
+
+ lba = 0;
+ count = 0;
+ to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
+ to_le32(&cmd.cdw12, 0x9875 | SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
+ nvmf_bdev_ctrlr_get_rw_params(&cmd, &lba, &count);
+ CU_ASSERT(lba == 0x1234567890ABCDEF);
+ CU_ASSERT(count == 0x9875 + 1); /* NOTE: this field is 0's based, hence the +1 */
+}
+
+static void
+test_lba_in_range(void)
+{
+ /* Trivial cases (no overflow) */
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1000) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1001) == false);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 999) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 1000) == false);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 999, 1) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1000, 1) == false);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1001, 1) == false);
+
+ /* Overflow edge cases */
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 0, UINT64_MAX) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 1, UINT64_MAX) == false);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX - 1, 1) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX, 1) == false);
+}
+
+static void
+test_get_dif_ctx(void)
+{
+ struct spdk_bdev bdev = {};
+ struct spdk_nvme_cmd cmd = {};
+ struct spdk_dif_ctx dif_ctx = {};
+ bool ret;
+
+ bdev.md_len = 0;
+
+ ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
+ CU_ASSERT(ret == false);
+
+ to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
+ bdev.blocklen = 520;
+ bdev.md_len = 8;
+
+ ret = nvmf_bdev_ctrlr_get_dif_ctx(&bdev, &cmd, &dif_ctx);
+ CU_ASSERT(ret == true);
+ CU_ASSERT(dif_ctx.block_size = 520);
+ CU_ASSERT(dif_ctx.md_size == 8);
+ CU_ASSERT(dif_ctx.init_ref_tag == 0x90ABCDEF);
+}
+
+static void
+test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd(void)
+{
+ int rc;
+ struct spdk_bdev bdev = {};
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel ch = {};
+
+ struct spdk_nvmf_request cmp_req = {};
+ union nvmf_c2h_msg cmp_rsp = {};
+
+ struct spdk_nvmf_request write_req = {};
+ union nvmf_c2h_msg write_rsp = {};
+
+ struct spdk_nvmf_qpair qpair = {};
+
+ struct spdk_nvme_cmd cmp_cmd = {};
+ struct spdk_nvme_cmd write_cmd = {};
+
+ struct spdk_nvmf_ctrlr ctrlr = {};
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_ns ns = {};
+ struct spdk_nvmf_ns *subsys_ns[1] = {};
+
+ struct spdk_nvmf_poll_group group = {};
+ struct spdk_nvmf_subsystem_poll_group sgroups = {};
+ struct spdk_nvmf_subsystem_pg_ns_info ns_info = {};
+
+ bdev.blocklen = 512;
+ bdev.num_blocks = 10;
+ ns.bdev = &bdev;
+
+ subsystem.id = 0;
+ subsystem.max_nsid = 1;
+ subsys_ns[0] = &ns;
+ subsystem.ns = (struct spdk_nvmf_ns **)&subsys_ns;
+
+ /* Enable controller */
+ ctrlr.vcprop.cc.bits.en = 1;
+ ctrlr.subsys = &subsystem;
+
+ group.num_sgroups = 1;
+ sgroups.num_ns = 1;
+ sgroups.ns_info = &ns_info;
+ group.sgroups = &sgroups;
+
+ qpair.ctrlr = &ctrlr;
+ qpair.group = &group;
+
+ cmp_req.qpair = &qpair;
+ cmp_req.cmd = (union nvmf_h2c_msg *)&cmp_cmd;
+ cmp_req.rsp = &cmp_rsp;
+
+ cmp_cmd.nsid = 1;
+ cmp_cmd.fuse = SPDK_NVME_CMD_FUSE_FIRST;
+ cmp_cmd.opc = SPDK_NVME_OPC_COMPARE;
+
+ write_req.qpair = &qpair;
+ write_req.cmd = (union nvmf_h2c_msg *)&write_cmd;
+ write_req.rsp = &write_rsp;
+
+ write_cmd.nsid = 1;
+ write_cmd.fuse = SPDK_NVME_CMD_FUSE_SECOND;
+ write_cmd.opc = SPDK_NVME_OPC_WRITE;
+
+ /* 1. SUCCESS */
+ cmp_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ cmp_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */
+
+ write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ write_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */
+ write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
+
+ rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
+
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sct == 0);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sc == 0);
+
+ /* 2. Fused command start lba / num blocks mismatch */
+ cmp_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ cmp_cmd.cdw12 = 2; /* NLB: CDW12 bits 15:00, 0's based */
+
+ write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ write_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */
+ write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
+
+ rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
+
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+ /* 3. SPDK_NVME_SC_LBA_OUT_OF_RANGE */
+ cmp_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ cmp_cmd.cdw12 = 100; /* NLB: CDW12 bits 15:00, 0's based */
+
+ write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ write_cmd.cdw12 = 100; /* NLB: CDW12 bits 15:00, 0's based */
+ write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen;
+
+ rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
+
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_LBA_OUT_OF_RANGE);
+
+ /* 4. SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID */
+ cmp_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ cmp_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */
+
+ write_cmd.cdw10 = 1; /* SLBA: CDW10 and CDW11 */
+ write_cmd.cdw12 = 1; /* NLB: CDW12 bits 15:00, 0's based */
+ write_req.length = (write_cmd.cdw12 + 1) * bdev.blocklen - 1;
+
+ rc = nvmf_bdev_ctrlr_compare_and_write_cmd(&bdev, desc, &ch, &cmp_req, &write_req);
+
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sct == 0);
+ CU_ASSERT(cmp_rsp.nvme_cpl.status.sc == 0);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(write_rsp.nvme_cpl.status.sc == SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_get_rw_params);
+ CU_ADD_TEST(suite, test_lba_in_range);
+ CU_ADD_TEST(suite, test_get_dif_ctx);
+
+ CU_ADD_TEST(suite, test_spdk_nvmf_bdev_ctrlr_compare_and_write_cmd);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/.gitignore b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/.gitignore
new file mode 100644
index 000000000..a975a97ec
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/.gitignore
@@ -0,0 +1 @@
+ctrlr_discovery_ut
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/Makefile b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/Makefile
new file mode 100644
index 000000000..d289bc3e8
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = json
+TEST_FILE = ctrlr_discovery_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut.c b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut.c
new file mode 100644
index 000000000..29e923de8
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut.c
@@ -0,0 +1,303 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "spdk_internal/mock.h"
+
+#include "common/lib/test_env.c"
+#include "nvmf/ctrlr_discovery.c"
+#include "nvmf/subsystem.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+
+DEFINE_STUB_V(spdk_bdev_module_release_bdev,
+ (struct spdk_bdev *bdev));
+
+DEFINE_STUB(spdk_bdev_get_block_size, uint32_t,
+ (const struct spdk_bdev *bdev), 512);
+
+DEFINE_STUB(spdk_nvmf_transport_stop_listen,
+ int,
+ (struct spdk_nvmf_transport *transport,
+ const struct spdk_nvme_transport_id *trid), 0);
+
+int
+spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **desc)
+{
+ return 0;
+}
+
+void
+spdk_bdev_close(struct spdk_bdev_desc *desc)
+{
+}
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return "test";
+}
+
+const struct spdk_uuid *
+spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
+{
+ return &bdev->uuid;
+}
+
+int
+spdk_nvmf_transport_listen(struct spdk_nvmf_transport *transport,
+ const struct spdk_nvme_transport_id *trid)
+{
+ return 0;
+}
+
+static struct spdk_nvmf_listener g_listener = {};
+
+struct spdk_nvmf_listener *
+nvmf_transport_find_listener(struct spdk_nvmf_transport *transport,
+ const struct spdk_nvme_transport_id *trid)
+{
+ return &g_listener;
+}
+
+void
+nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
+ struct spdk_nvme_transport_id *trid,
+ struct spdk_nvmf_discovery_log_page_entry *entry)
+{
+ entry->trtype = 42;
+}
+
+struct spdk_nvmf_transport_ops g_transport_ops = {};
+
+static struct spdk_nvmf_transport g_transport = {
+ .ops = &g_transport_ops
+};
+
+struct spdk_nvmf_transport *
+spdk_nvmf_transport_create(const char *transport_name,
+ struct spdk_nvmf_transport_opts *tprt_opts)
+{
+ if (strcasecmp(transport_name, spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_RDMA))) {
+ return &g_transport;
+ }
+
+ return NULL;
+}
+
+struct spdk_nvmf_subsystem *
+spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn)
+{
+ return NULL;
+}
+
+struct spdk_nvmf_transport *
+spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, const char *transport_name)
+{
+ return &g_transport;
+}
+
+int
+spdk_nvme_transport_id_parse_trtype(enum spdk_nvme_transport_type *trtype, const char *str)
+{
+ if (trtype == NULL || str == NULL) {
+ return -EINVAL;
+ }
+
+ if (strcasecmp(str, "PCIe") == 0) {
+ *trtype = SPDK_NVME_TRANSPORT_PCIE;
+ } else if (strcasecmp(str, "RDMA") == 0) {
+ *trtype = SPDK_NVME_TRANSPORT_RDMA;
+ } else {
+ return -ENOENT;
+ }
+ return 0;
+}
+
+int
+spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
+ const struct spdk_nvme_transport_id *trid2)
+{
+ return 0;
+}
+
+void
+nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid)
+{
+}
+
+void
+nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
+{
+}
+
+int
+nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem)
+{
+ return 0;
+}
+
+int
+nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+void
+nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+void
+nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+void
+nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+static void
+_subsystem_add_listen_done(void *cb_arg, int status)
+{
+ SPDK_CU_ASSERT_FATAL(status == 0);
+}
+
+static void
+test_discovery_log(void)
+{
+ struct spdk_nvmf_tgt tgt = {};
+ struct spdk_nvmf_subsystem *subsystem;
+ uint8_t buffer[8192];
+ struct iovec iov;
+ struct spdk_nvmf_discovery_log_page *disc_log;
+ struct spdk_nvmf_discovery_log_page_entry *entry;
+ struct spdk_nvme_transport_id trid = {};
+
+ iov.iov_base = buffer;
+ iov.iov_len = 8192;
+
+ tgt.max_subsystems = 1024;
+ tgt.subsystems = calloc(tgt.max_subsystems, sizeof(struct spdk_nvmf_subsystem *));
+ SPDK_CU_ASSERT_FATAL(tgt.subsystems != NULL);
+
+ /* Add one subsystem and verify that the discovery log contains it */
+ subsystem = spdk_nvmf_subsystem_create(&tgt, "nqn.2016-06.io.spdk:subsystem1",
+ SPDK_NVMF_SUBTYPE_NVME, 0);
+ subsystem->allow_any_host = true;
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+
+ trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+ trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
+ snprintf(trid.traddr, sizeof(trid.traddr), "1234");
+ snprintf(trid.trsvcid, sizeof(trid.trsvcid), "5678");
+ spdk_nvmf_subsystem_add_listener(subsystem, &trid, _subsystem_add_listen_done, NULL);
+ subsystem->state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+
+ /* Get only genctr (first field in the header) */
+ memset(buffer, 0xCC, sizeof(buffer));
+ disc_log = (struct spdk_nvmf_discovery_log_page *)buffer;
+ nvmf_get_discovery_log_page(&tgt, "nqn.2016-06.io.spdk:host1", &iov, 1, 0,
+ sizeof(disc_log->genctr));
+ CU_ASSERT(disc_log->genctr == 2); /* one added subsystem and listener */
+
+ /* Get only the header, no entries */
+ memset(buffer, 0xCC, sizeof(buffer));
+ disc_log = (struct spdk_nvmf_discovery_log_page *)buffer;
+ nvmf_get_discovery_log_page(&tgt, "nqn.2016-06.io.spdk:host1", &iov, 1, 0, sizeof(*disc_log));
+ CU_ASSERT(disc_log->genctr == 2);
+ CU_ASSERT(disc_log->numrec == 1);
+
+ /* Offset 0, exact size match */
+ memset(buffer, 0xCC, sizeof(buffer));
+ disc_log = (struct spdk_nvmf_discovery_log_page *)buffer;
+ nvmf_get_discovery_log_page(&tgt, "nqn.2016-06.io.spdk:host1", &iov, 1, 0,
+ sizeof(*disc_log) + sizeof(disc_log->entries[0]));
+ CU_ASSERT(disc_log->genctr != 0);
+ CU_ASSERT(disc_log->numrec == 1);
+ CU_ASSERT(disc_log->entries[0].trtype == 42);
+
+ /* Offset 0, oversize buffer */
+ memset(buffer, 0xCC, sizeof(buffer));
+ disc_log = (struct spdk_nvmf_discovery_log_page *)buffer;
+ nvmf_get_discovery_log_page(&tgt, "nqn.2016-06.io.spdk:host1", &iov, 1, 0, sizeof(buffer));
+ CU_ASSERT(disc_log->genctr != 0);
+ CU_ASSERT(disc_log->numrec == 1);
+ CU_ASSERT(disc_log->entries[0].trtype == 42);
+ CU_ASSERT(spdk_mem_all_zero(buffer + sizeof(*disc_log) + sizeof(disc_log->entries[0]),
+ sizeof(buffer) - (sizeof(*disc_log) + sizeof(disc_log->entries[0]))));
+
+ /* Get just the first entry, no header */
+ memset(buffer, 0xCC, sizeof(buffer));
+ entry = (struct spdk_nvmf_discovery_log_page_entry *)buffer;
+ nvmf_get_discovery_log_page(&tgt, "nqn.2016-06.io.spdk:host1", &iov,
+ 1,
+ offsetof(struct spdk_nvmf_discovery_log_page, entries[0]),
+ sizeof(*entry));
+ CU_ASSERT(entry->trtype == 42);
+ subsystem->state = SPDK_NVMF_SUBSYSTEM_INACTIVE;
+ spdk_nvmf_subsystem_destroy(subsystem);
+ free(tgt.subsystems);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_discovery_log);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/fc.c/.gitignore b/src/spdk/test/unit/lib/nvmf/fc.c/.gitignore
new file mode 100644
index 000000000..3895b84ab
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/fc.c/.gitignore
@@ -0,0 +1 @@
+fc_ut
diff --git a/src/spdk/test/unit/lib/nvmf/fc.c/Makefile b/src/spdk/test/unit/lib/nvmf/fc.c/Makefile
new file mode 100644
index 000000000..7f54f1520
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/fc.c/Makefile
@@ -0,0 +1,58 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2018 Broadcom. All Rights Reserved.
+# The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../)
+include $(SPDK_ROOT_DIR)/mk/config.mk
+
+CFLAGS += -I$(SPDK_ROOT_DIR)/test/common/lib -I$(SPDK_ROOT_DIR)/lib \
+-I$(SPDK_ROOT_DIR)/lib/nvmf
+
+ifneq ($(strip $(CONFIG_FC_PATH)),)
+CFLAGS += -I$(CONFIG_FC_PATH)
+endif
+
+TEST_FILE = fc_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
+
+# Disable clang warning: taking address of packed member of class or structure may result in an unaligned pointer value [-Werror,-Waddress-of-packed-member]
+ifeq ($(CC),clang)
+ CLANG_VERSION := $(shell $(CC) -v 2>&1 | \
+ sed -n "s/.*version \([0-9]*\.[0-9]*\).*/\1/p")
+
+CLANG_MAJOR_VERSION := $(shell echo $(CLANG_VERSION) | cut -f1 -d.)
+
+ifeq ($(shell test $(CLANG_MAJOR_VERSION) -ge 4 && echo 1), 1)
+ CFLAGS += -Wno-address-of-packed-member
+endif
+endif
diff --git a/src/spdk/test/unit/lib/nvmf/fc.c/fc_ut.c b/src/spdk/test/unit/lib/nvmf/fc.c/fc_ut.c
new file mode 100644
index 000000000..a8d4b3b96
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/fc.c/fc_ut.c
@@ -0,0 +1,505 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (c) 2018-2019 Broadcom. All Rights Reserved.
+ * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* NVMF FC Transport Unit Test */
+
+#include "spdk/env.h"
+#include "spdk_cunit.h"
+#include "spdk/nvmf.h"
+#include "spdk/endian.h"
+#include "spdk/trace.h"
+#include "spdk_internal/log.h"
+
+#include "ut_multithread.c"
+
+#include "transport.h"
+#include "nvmf_internal.h"
+
+#include "nvmf_fc.h"
+
+#include "json/json_util.c"
+#include "json/json_write.c"
+#include "nvmf/nvmf.c"
+#include "nvmf/transport.c"
+#include "nvmf/subsystem.c"
+#include "nvmf/fc.c"
+#include "nvmf/fc_ls.c"
+
+/*
+ * SPDK Stuff
+ */
+
+#ifdef SPDK_CONFIG_RDMA
+const struct spdk_nvmf_transport_ops spdk_nvmf_transport_rdma = {
+ .type = SPDK_NVME_TRANSPORT_RDMA,
+ .opts_init = NULL,
+ .create = NULL,
+ .destroy = NULL,
+
+ .listen = NULL,
+ .stop_listen = NULL,
+ .accept = NULL,
+
+ .listener_discover = NULL,
+
+ .poll_group_create = NULL,
+ .poll_group_destroy = NULL,
+ .poll_group_add = NULL,
+ .poll_group_poll = NULL,
+
+ .req_free = NULL,
+ .req_complete = NULL,
+
+ .qpair_fini = NULL,
+ .qpair_get_peer_trid = NULL,
+ .qpair_get_local_trid = NULL,
+ .qpair_get_listen_trid = NULL,
+};
+#endif
+
+const struct spdk_nvmf_transport_ops spdk_nvmf_transport_tcp = {
+ .type = SPDK_NVME_TRANSPORT_TCP,
+};
+
+struct spdk_trace_histories *g_trace_histories;
+
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+DEFINE_STUB(spdk_nvme_transport_id_compare, int,
+ (const struct spdk_nvme_transport_id *trid1,
+ const struct spdk_nvme_transport_id *trid2), 0);
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description,
+ (const char *name, uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object, uint8_t arg1_type,
+ const char *arg1_name));
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), "fc_ut_test");
+DEFINE_STUB_V(nvmf_ctrlr_destruct, (struct spdk_nvmf_ctrlr *ctrlr));
+DEFINE_STUB_V(nvmf_qpair_free_aer, (struct spdk_nvmf_qpair *qpair));
+DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc),
+ NULL);
+DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
+DEFINE_STUB_V(nvmf_ctrlr_ns_changed, (struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid));
+DEFINE_STUB(spdk_bdev_open, int, (struct spdk_bdev *bdev, bool write,
+ spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **desc), 0);
+DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
+DEFINE_STUB(spdk_bdev_module_claim_bdev, int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
+ struct spdk_bdev_module *module), 0);
+DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
+DEFINE_STUB(spdk_bdev_get_block_size, uint32_t, (const struct spdk_bdev *bdev), 512);
+DEFINE_STUB(spdk_bdev_get_num_blocks, uint64_t, (const struct spdk_bdev *bdev), 1024);
+
+DEFINE_STUB(nvmf_ctrlr_async_event_ns_notice, int, (struct spdk_nvmf_ctrlr *ctrlr), 0);
+DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
+ enum spdk_nvme_transport_type trtype));
+DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts,
+ struct spdk_nvmf_ctrlr_data *cdata));
+DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req),
+ -ENOSPC);
+
+const char *
+spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
+{
+ switch (trtype) {
+ case SPDK_NVME_TRANSPORT_PCIE:
+ return "PCIe";
+ case SPDK_NVME_TRANSPORT_RDMA:
+ return "RDMA";
+ case SPDK_NVME_TRANSPORT_FC:
+ return "FC";
+ default:
+ return NULL;
+ }
+}
+
+const char *
+spdk_nvme_transport_id_adrfam_str(enum spdk_nvmf_adrfam adrfam)
+{
+ switch (adrfam) {
+ case SPDK_NVMF_ADRFAM_IPV4:
+ return "IPv4";
+ case SPDK_NVMF_ADRFAM_IPV6:
+ return "IPv6";
+ case SPDK_NVMF_ADRFAM_IB:
+ return "IB";
+ case SPDK_NVMF_ADRFAM_FC:
+ return "FC";
+ default:
+ return NULL;
+ }
+}
+
+const struct spdk_uuid *
+spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
+{
+ return &bdev->uuid;
+}
+
+static bool g_lld_init_called = false;
+
+int
+nvmf_fc_lld_init(void)
+{
+ g_lld_init_called = true;
+ return 0;
+}
+
+static bool g_lld_fini_called = false;
+
+void
+nvmf_fc_lld_fini(void)
+{
+ g_lld_fini_called = true;
+}
+
+DEFINE_STUB_V(nvmf_fc_lld_start, (void));
+DEFINE_STUB(nvmf_fc_init_q, int, (struct spdk_nvmf_fc_hwqp *hwqp), 0);
+DEFINE_STUB_V(nvmf_fc_reinit_q, (void *queues_prev, void *queues_curr));
+DEFINE_STUB(nvmf_fc_init_rqpair_buffers, int, (struct spdk_nvmf_fc_hwqp *hwqp), 0);
+DEFINE_STUB(nvmf_fc_set_q_online_state, int, (struct spdk_nvmf_fc_hwqp *hwqp, bool online), 0);
+DEFINE_STUB(nvmf_fc_put_xchg, int, (struct spdk_nvmf_fc_hwqp *hwqp, struct spdk_nvmf_fc_xchg *xri),
+ 0);
+DEFINE_STUB(nvmf_fc_recv_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
+DEFINE_STUB(nvmf_fc_send_data, int, (struct spdk_nvmf_fc_request *fc_req), 0);
+DEFINE_STUB_V(nvmf_fc_rqpair_buffer_release, (struct spdk_nvmf_fc_hwqp *hwqp, uint16_t buff_idx));
+DEFINE_STUB(nvmf_fc_xmt_rsp, int, (struct spdk_nvmf_fc_request *fc_req, uint8_t *ersp_buf,
+ uint32_t ersp_len), 0);
+DEFINE_STUB(nvmf_fc_xmt_ls_rsp, int, (struct spdk_nvmf_fc_nport *tgtport,
+ struct spdk_nvmf_fc_ls_rqst *ls_rqst), 0);
+DEFINE_STUB(nvmf_fc_issue_abort, int, (struct spdk_nvmf_fc_hwqp *hwqp,
+ struct spdk_nvmf_fc_xchg *xri,
+ spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
+DEFINE_STUB(nvmf_fc_xmt_bls_rsp, int, (struct spdk_nvmf_fc_hwqp *hwqp,
+ uint16_t ox_id, uint16_t rx_id,
+ uint16_t rpi, bool rjt, uint8_t rjt_exp,
+ spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
+DEFINE_STUB(nvmf_fc_alloc_srsr_bufs, struct spdk_nvmf_fc_srsr_bufs *, (size_t rqst_len,
+ size_t rsp_len), NULL);
+DEFINE_STUB_V(nvmf_fc_free_srsr_bufs, (struct spdk_nvmf_fc_srsr_bufs *srsr_bufs));
+DEFINE_STUB(nvmf_fc_xmt_srsr_req, int, (struct spdk_nvmf_fc_hwqp *hwqp,
+ struct spdk_nvmf_fc_srsr_bufs *xmt_srsr_bufs,
+ spdk_nvmf_fc_caller_cb cb, void *cb_args), 0);
+DEFINE_STUB(nvmf_fc_q_sync_available, bool, (void), true);
+DEFINE_STUB(nvmf_fc_issue_q_sync, int, (struct spdk_nvmf_fc_hwqp *hwqp, uint64_t u_id,
+ uint16_t skip_rq), 0);
+DEFINE_STUB(nvmf_fc_assign_conn_to_hwqp, bool, (struct spdk_nvmf_fc_hwqp *hwqp,
+ uint64_t *conn_id, uint32_t sq_size), true);
+DEFINE_STUB(nvmf_fc_get_hwqp_from_conn_id, struct spdk_nvmf_fc_hwqp *,
+ (struct spdk_nvmf_fc_hwqp *queues,
+ uint32_t num_queues, uint64_t conn_id), NULL);
+DEFINE_STUB_V(nvmf_fc_release_conn, (struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id,
+ uint32_t sq_size));
+DEFINE_STUB_V(nvmf_fc_dump_all_queues, (struct spdk_nvmf_fc_hwqp *ls_queue,
+ struct spdk_nvmf_fc_hwqp *io_queues,
+ uint32_t num_io_queues,
+ struct spdk_nvmf_fc_queue_dump_info *dump_info));
+DEFINE_STUB_V(nvmf_fc_get_xri_info, (struct spdk_nvmf_fc_hwqp *hwqp,
+ struct spdk_nvmf_fc_xchg_info *info));
+DEFINE_STUB(nvmf_fc_get_rsvd_thread, struct spdk_thread *, (void), NULL);
+
+uint32_t
+nvmf_fc_process_queue(struct spdk_nvmf_fc_hwqp *hwqp)
+{
+ hwqp->lcore_id++;
+ return 0; /* always return 0 or else it will poll forever */
+}
+
+struct spdk_nvmf_fc_xchg *
+nvmf_fc_get_xri(struct spdk_nvmf_fc_hwqp *hwqp)
+{
+ static struct spdk_nvmf_fc_xchg xchg;
+
+ xchg.xchg_id = 1;
+ return &xchg;
+}
+
+#define MAX_FC_UT_POLL_THREADS 8
+static struct spdk_nvmf_poll_group *g_poll_groups[MAX_FC_UT_POLL_THREADS] = {0};
+#define MAX_FC_UT_HWQPS MAX_FC_UT_POLL_THREADS
+static struct spdk_nvmf_tgt *g_nvmf_tgt = NULL;
+static struct spdk_nvmf_transport *g_nvmf_tprt = NULL;
+uint8_t g_fc_port_handle = 0xff;
+struct spdk_nvmf_fc_hwqp lld_q[MAX_FC_UT_HWQPS];
+
+static void
+_add_transport_done(void *arg, int status)
+{
+ CU_ASSERT(status == 0);
+}
+
+static void
+_add_transport_done_dup_err(void *arg, int status)
+{
+ CU_ASSERT(status == -EEXIST);
+}
+
+static void
+create_transport_test(void)
+{
+ const struct spdk_nvmf_transport_ops *ops = NULL;
+ struct spdk_nvmf_transport_opts opts = { 0 };
+ struct spdk_nvmf_target_opts tgt_opts = {
+ .name = "nvmf_test_tgt",
+ .max_subsystems = 0
+ };
+
+ allocate_threads(8);
+ set_thread(0);
+
+ g_nvmf_tgt = spdk_nvmf_tgt_create(&tgt_opts);
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
+
+ ops = nvmf_get_transport_ops(SPDK_NVME_TRANSPORT_NAME_FC);
+ SPDK_CU_ASSERT_FATAL(ops != NULL);
+
+ ops->opts_init(&opts);
+
+ g_lld_init_called = false;
+ g_nvmf_tprt = spdk_nvmf_transport_create("FC", &opts);
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
+
+ CU_ASSERT(g_lld_init_called == true);
+ CU_ASSERT(opts.max_queue_depth == g_nvmf_tprt->opts.max_queue_depth);
+ CU_ASSERT(opts.max_qpairs_per_ctrlr == g_nvmf_tprt->opts.max_qpairs_per_ctrlr);
+ CU_ASSERT(opts.in_capsule_data_size == g_nvmf_tprt->opts.in_capsule_data_size);
+ CU_ASSERT(opts.max_io_size == g_nvmf_tprt->opts.max_io_size);
+ CU_ASSERT(opts.io_unit_size == g_nvmf_tprt->opts.io_unit_size);
+ CU_ASSERT(opts.max_aq_depth == g_nvmf_tprt->opts.max_aq_depth);
+
+ set_thread(0);
+
+ spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
+ _add_transport_done, 0);
+ poll_thread(0);
+
+ /* Add transport again - should get error */
+ spdk_nvmf_tgt_add_transport(g_nvmf_tgt, g_nvmf_tprt,
+ _add_transport_done_dup_err, 0);
+ poll_thread(0);
+
+ /* create transport with bad args/options */
+#ifndef SPDK_CONFIG_RDMA
+ CU_ASSERT(spdk_nvmf_transport_create("RDMA", &opts) == NULL);
+#endif
+ CU_ASSERT(spdk_nvmf_transport_create("Bogus Transport", &opts) == NULL);
+ opts.max_io_size = 1024 ^ 3;
+ CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL);
+ opts.max_io_size = 999;
+ opts.io_unit_size = 1024;
+ CU_ASSERT(spdk_nvmf_transport_create("FC", &opts) == NULL);
+}
+
+static void
+port_init_cb(uint8_t port_handle, enum spdk_fc_event event_type, void *arg, int err)
+{
+ CU_ASSERT(err == 0);
+ CU_ASSERT(port_handle == 2);
+ g_fc_port_handle = port_handle;
+}
+
+static void
+create_fc_port_test(void)
+{
+ struct spdk_nvmf_fc_hw_port_init_args init_args = { 0 };
+ struct spdk_nvmf_fc_port *fc_port = NULL;
+ int err;
+
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
+
+ init_args.port_handle = 2;
+ init_args.io_queue_cnt = spdk_min(MAX_FC_UT_HWQPS, spdk_env_get_core_count());
+ init_args.ls_queue_size = 100;
+ init_args.io_queue_size = 100;
+ init_args.io_queues = (void *)lld_q;
+
+ set_thread(0);
+ err = nvmf_fc_master_enqueue_event(SPDK_FC_HW_PORT_INIT, (void *)&init_args, port_init_cb);
+ CU_ASSERT(err == 0);
+ poll_thread(0);
+
+ fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
+ CU_ASSERT(fc_port != NULL);
+}
+
+static void
+online_fc_port_test(void)
+{
+ struct spdk_nvmf_fc_port *fc_port;
+ struct spdk_nvmf_fc_hw_port_online_args args;
+ int err;
+
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
+
+ fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
+ SPDK_CU_ASSERT_FATAL(fc_port != NULL);
+
+ set_thread(0);
+ args.port_handle = g_fc_port_handle;
+ err = nvmf_fc_master_enqueue_event(SPDK_FC_HW_PORT_ONLINE, (void *)&args, port_init_cb);
+ CU_ASSERT(err == 0);
+ poll_threads();
+ set_thread(0);
+ if (err == 0) {
+ uint32_t i;
+ for (i = 0; i < fc_port->num_io_queues; i++) {
+ CU_ASSERT(fc_port->io_queues[i].fgroup != 0);
+ CU_ASSERT(fc_port->io_queues[i].fgroup != 0);
+ CU_ASSERT(fc_port->io_queues[i].fgroup->hwqp_count != 0);
+ }
+ }
+}
+
+static void
+create_poll_groups_test(void)
+{
+ unsigned i;
+
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
+
+ for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
+ set_thread(i);
+ g_poll_groups[i] = spdk_nvmf_poll_group_create(g_nvmf_tgt);
+ poll_thread(i);
+ CU_ASSERT(g_poll_groups[i] != NULL);
+ }
+ set_thread(0);
+}
+
+static void
+poll_group_poll_test(void)
+{
+ unsigned i;
+ unsigned poll_cnt = 10;
+ struct spdk_nvmf_fc_port *fc_port = NULL;
+
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
+
+ set_thread(0);
+ fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
+ SPDK_CU_ASSERT_FATAL(fc_port != NULL);
+
+ for (i = 0; i < fc_port->num_io_queues; i++) {
+ fc_port->io_queues[i].lcore_id = 0;
+ }
+
+ for (i = 0; i < poll_cnt; i++) {
+ /* this should cause spdk_nvmf_fc_poll_group_poll to be called() */
+ poll_threads();
+ }
+
+ /* check if hwqp's lcore_id has been updated */
+ for (i = 0; i < fc_port->num_io_queues; i++) {
+ CU_ASSERT(fc_port->io_queues[i].lcore_id == poll_cnt);
+ }
+}
+
+static void
+remove_hwqps_from_poll_groups_test(void)
+{
+ unsigned i;
+ struct spdk_nvmf_fc_port *fc_port = NULL;
+
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
+
+ fc_port = nvmf_fc_port_lookup(g_fc_port_handle);
+ SPDK_CU_ASSERT_FATAL(fc_port != NULL);
+
+ for (i = 0; i < fc_port->num_io_queues; i++) {
+ nvmf_fc_poll_group_remove_hwqp(&fc_port->io_queues[i]);
+ poll_threads();
+ CU_ASSERT(fc_port->io_queues[i].fgroup == 0);
+ }
+}
+
+static void
+destroy_transport_test(void)
+{
+ unsigned i;
+
+ set_thread(0);
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tprt != NULL);
+
+ for (i = 0; i < MAX_FC_UT_POLL_THREADS; i++) {
+ set_thread(i);
+ spdk_nvmf_poll_group_destroy(g_poll_groups[i], NULL, NULL);
+ poll_thread(0);
+ }
+
+ SPDK_CU_ASSERT_FATAL(g_nvmf_tgt != NULL);
+ g_lld_fini_called = false;
+ spdk_nvmf_tgt_destroy(g_nvmf_tgt, NULL, NULL);
+ poll_threads();
+ CU_ASSERT(g_lld_fini_called == true);
+}
+
+static int
+nvmf_fc_tests_init(void)
+{
+ return 0;
+}
+
+static int
+nvmf_fc_tests_fini(void)
+{
+ free_threads();
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ unsigned int num_failures = 0;
+ CU_pSuite suite = NULL;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("NVMf-FC", nvmf_fc_tests_init, nvmf_fc_tests_fini);
+
+ CU_ADD_TEST(suite, create_transport_test);
+ CU_ADD_TEST(suite, create_poll_groups_test);
+ CU_ADD_TEST(suite, create_fc_port_test);
+ CU_ADD_TEST(suite, online_fc_port_test);
+ CU_ADD_TEST(suite, poll_group_poll_test);
+ CU_ADD_TEST(suite, remove_hwqps_from_poll_groups_test);
+ CU_ADD_TEST(suite, destroy_transport_test);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/fc_ls.c/.gitignore b/src/spdk/test/unit/lib/nvmf/fc_ls.c/.gitignore
new file mode 100644
index 000000000..ac5b0c40e
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/fc_ls.c/.gitignore
@@ -0,0 +1 @@
+fc_ls_ut
diff --git a/src/spdk/test/unit/lib/nvmf/fc_ls.c/Makefile b/src/spdk/test/unit/lib/nvmf/fc_ls.c/Makefile
new file mode 100644
index 000000000..d9143e627
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/fc_ls.c/Makefile
@@ -0,0 +1,45 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2018 Broadcom. All Rights Reserved.
+# The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../)
+include $(SPDK_ROOT_DIR)/mk/config.mk
+
+CFLAGS += -I$(SPDK_ROOT_DIR)/test/common/lib -I$(SPDK_ROOT_DIR)/lib/nvmf
+
+ifneq ($(strip $(CONFIG_FC_PATH)),)
+CFLAGS += -I$(CONFIG_FC_PATH)
+endif
+
+TEST_FILE = fc_ls_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/fc_ls.c/fc_ls_ut.c b/src/spdk/test/unit/lib/nvmf/fc_ls.c/fc_ls_ut.c
new file mode 100644
index 000000000..68eb81960
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/fc_ls.c/fc_ls_ut.c
@@ -0,0 +1,1070 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (c) 2018-2019 Broadcom. All Rights Reserved.
+ * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* NVMF FC LS Command Processor Unit Test */
+
+#include "spdk/env.h"
+#include "spdk_cunit.h"
+#include "spdk/nvmf.h"
+#include "spdk/endian.h"
+#include "spdk/trace.h"
+#include "spdk_internal/log.h"
+
+#include "ut_multithread.c"
+
+#include "transport.h"
+#include "nvmf_internal.h"
+#include "nvmf_fc.h"
+
+#include "fc_ls.c"
+
+#define LAST_RSLT_STOP_TEST 999
+
+void spdk_set_thread(struct spdk_thread *thread);
+
+/*
+ * SPDK Stuff
+ */
+
+DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), -ENOSPC);
+DEFINE_STUB(spdk_nvmf_subsystem_host_allowed, bool,
+ (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn), true);
+DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
+ enum spdk_nvme_transport_type trtype));
+
+static const char *fc_ut_subsystem_nqn =
+ "nqn.2017-11.io.spdk:sn.390c0dc7c87011e786b300a0989adc53:subsystem.good";
+static struct spdk_nvmf_host fc_ut_initiator = {
+ .nqn = "nqn.2017-11.fc_host",
+};
+static struct spdk_nvmf_host *fc_ut_host = &fc_ut_initiator;
+static struct spdk_nvmf_tgt g_nvmf_tgt;
+static struct spdk_nvmf_transport_opts g_nvmf_transport_opts = {
+ .max_queue_depth = 128,
+ .max_qpairs_per_ctrlr = 4,
+ .max_aq_depth = 32,
+};
+static uint32_t g_hw_queue_depth = 1024;
+static struct spdk_nvmf_subsystem g_nvmf_subsystem;
+
+void nvmf_fc_request_abort(struct spdk_nvmf_fc_request *fc_req, bool send_abts,
+ spdk_nvmf_fc_caller_cb cb, void *cb_args);
+void spdk_bdev_io_abort(struct spdk_bdev_io *bdev_io, void *ctx);
+void nvmf_fc_request_abort_complete(void *arg1);
+bool nvmf_fc_req_in_xfer(struct spdk_nvmf_fc_request *fc_req);
+
+struct spdk_nvmf_subsystem *
+spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn)
+{
+ if (!strcmp(subnqn, g_nvmf_subsystem.subnqn)) {
+ return &g_nvmf_subsystem;
+ }
+ return NULL;
+}
+
+int
+spdk_nvmf_poll_group_add(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_qpair *qpair)
+{
+ qpair->state = SPDK_NVMF_QPAIR_ACTIVE;
+ return 0;
+}
+
+const struct spdk_nvmf_transport_ops spdk_nvmf_transport_fc = {
+ .type = (enum spdk_nvme_transport_type) SPDK_NVMF_TRTYPE_FC,
+ .create = NULL,
+ .destroy = NULL,
+
+ .listen = NULL,
+ .stop_listen = NULL,
+ .accept = NULL,
+
+ .listener_discover = NULL,
+
+ .poll_group_create = NULL,
+ .poll_group_destroy = NULL,
+ .poll_group_add = NULL,
+ .poll_group_poll = NULL,
+
+ .req_complete = NULL,
+
+ .qpair_fini = NULL,
+
+};
+
+struct spdk_nvmf_transport g_nvmf_transport = {
+ .ops = &spdk_nvmf_transport_fc,
+ .tgt = &g_nvmf_tgt,
+};
+
+struct spdk_nvmf_transport *
+spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, const char *transport_name)
+{
+ return &g_nvmf_transport;
+}
+
+int
+spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
+{
+ cb_fn(ctx);
+ return 0;
+}
+
+void
+spdk_nvmf_tgt_new_qpair(struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair)
+{
+ uint32_t i;
+ struct spdk_nvmf_fc_conn *fc_conn;
+ struct spdk_nvmf_fc_hwqp *hwqp = NULL, *sel_hwqp = NULL;
+ struct spdk_nvmf_fc_ls_add_conn_api_data *api_data = NULL;
+ struct spdk_nvmf_fc_port *fc_port;
+
+ fc_conn = SPDK_CONTAINEROF(qpair, struct spdk_nvmf_fc_conn, qpair);
+ api_data = &fc_conn->create_opd->u.add_conn;
+
+ /* Pick a hwqp with least load */
+ fc_port = fc_conn->fc_assoc->tgtport->fc_port;
+ for (i = 0; i < fc_port->num_io_queues; i ++) {
+ hwqp = &fc_port->io_queues[i];
+ if (!sel_hwqp || (hwqp->rq_size > sel_hwqp->rq_size)) {
+ sel_hwqp = hwqp;
+ }
+ }
+
+ if (!nvmf_fc_assign_conn_to_hwqp(sel_hwqp,
+ &fc_conn->conn_id,
+ fc_conn->max_queue_depth)) {
+ goto err;
+ }
+
+ fc_conn->hwqp = sel_hwqp;
+
+ /* If this is for ADMIN connection, then update assoc ID. */
+ if (fc_conn->qpair.qid == 0) {
+ fc_conn->fc_assoc->assoc_id = fc_conn->conn_id;
+ }
+
+ nvmf_fc_poller_api_func(sel_hwqp, SPDK_NVMF_FC_POLLER_API_ADD_CONNECTION, &api_data->args);
+
+ return;
+err:
+ nvmf_fc_ls_add_conn_failure(api_data->assoc, api_data->ls_rqst,
+ api_data->args.fc_conn, api_data->aq_conn);
+}
+
+struct spdk_nvmf_fc_conn *
+nvmf_fc_hwqp_find_fc_conn(struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id)
+{
+ struct spdk_nvmf_fc_conn *fc_conn;
+
+ TAILQ_FOREACH(fc_conn, &hwqp->connection_list, link) {
+ if (fc_conn->conn_id == conn_id) {
+ return fc_conn;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * LLD functions
+ */
+
+static inline uint64_t
+nvmf_fc_gen_conn_id(uint32_t qnum, struct spdk_nvmf_fc_hwqp *hwqp)
+{
+ static uint16_t conn_cnt = 0;
+ return ((uint64_t) qnum | (conn_cnt++ << 8));
+}
+
+bool
+nvmf_fc_assign_conn_to_hwqp(struct spdk_nvmf_fc_hwqp *hwqp,
+ uint64_t *conn_id, uint32_t sq_size)
+{
+ SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_LS, "Assign connection to HWQP\n");
+
+
+ if (hwqp->rq_size < sq_size) {
+ return false; /* queue has no space for this connection */
+ }
+
+ hwqp->rq_size -= sq_size;
+ hwqp->num_conns++;
+
+ /* create connection ID */
+ *conn_id = nvmf_fc_gen_conn_id(hwqp->hwqp_id, hwqp);
+
+ SPDK_DEBUGLOG(SPDK_LOG_NVMF_FC_LS,
+ "New connection assigned to HWQP%d (free %d), conn_id 0x%lx\n",
+ hwqp->hwqp_id, hwqp->rq_size, *conn_id);
+ return true;
+}
+
+struct spdk_nvmf_fc_hwqp *
+nvmf_fc_get_hwqp_from_conn_id(struct spdk_nvmf_fc_hwqp *queues,
+ uint32_t num_queues, uint64_t conn_id)
+{
+ return &queues[(conn_id & 0xff) % num_queues];
+}
+
+void
+nvmf_fc_release_conn(struct spdk_nvmf_fc_hwqp *hwqp, uint64_t conn_id,
+ uint32_t sq_size)
+{
+ hwqp->rq_size += sq_size;
+}
+
+struct spdk_nvmf_fc_srsr_bufs *
+nvmf_fc_alloc_srsr_bufs(size_t rqst_len, size_t rsp_len)
+{
+ struct spdk_nvmf_fc_srsr_bufs *srsr_bufs;
+
+ srsr_bufs = calloc(1, sizeof(struct spdk_nvmf_fc_srsr_bufs));
+ if (!srsr_bufs) {
+ return NULL;
+ }
+
+ srsr_bufs->rqst = calloc(1, rqst_len + rsp_len);
+ if (srsr_bufs->rqst) {
+ srsr_bufs->rqst_len = rqst_len;
+ srsr_bufs->rsp = srsr_bufs->rqst + rqst_len;
+ srsr_bufs->rsp_len = rsp_len;
+ } else {
+ free(srsr_bufs);
+ srsr_bufs = NULL;
+ }
+
+ return srsr_bufs;
+}
+
+void
+nvmf_fc_free_srsr_bufs(struct spdk_nvmf_fc_srsr_bufs *srsr_bufs)
+{
+ if (srsr_bufs) {
+ free(srsr_bufs->rqst);
+ free(srsr_bufs);
+ }
+}
+
+/*
+ * The Tests
+ */
+
+enum _test_run_type {
+ TEST_RUN_TYPE_CREATE_ASSOC = 1,
+ TEST_RUN_TYPE_CREATE_CONN,
+ TEST_RUN_TYPE_DISCONNECT,
+ TEST_RUN_TYPE_CONN_BAD_ASSOC,
+ TEST_RUN_TYPE_FAIL_LS_RSP,
+ TEST_RUN_TYPE_DISCONNECT_BAD_ASSOC,
+ TEST_RUN_TYPE_CREATE_MAX_ASSOC,
+};
+
+static uint32_t g_test_run_type = 0;
+static uint64_t g_curr_assoc_id = 0;
+static uint16_t g_create_conn_test_cnt = 0;
+static uint16_t g_max_assoc_conn_test = 0;
+static int g_last_rslt = 0;
+static bool g_spdk_nvmf_fc_xmt_srsr_req = false;
+static struct spdk_nvmf_fc_remote_port_info g_rem_port;
+
+static void
+run_create_assoc_test(const char *subnqn,
+ struct spdk_nvmf_host *host,
+ struct spdk_nvmf_fc_nport *tgt_port)
+{
+ struct spdk_nvmf_fc_ls_rqst ls_rqst;
+ struct spdk_nvmf_fc_ls_cr_assoc_rqst ca_rqst;
+ uint8_t respbuf[128];
+
+ memset(&ca_rqst, 0, sizeof(struct spdk_nvmf_fc_ls_cr_assoc_rqst));
+
+ ca_rqst.w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
+ to_be32(&ca_rqst.desc_list_len,
+ sizeof(struct spdk_nvmf_fc_ls_cr_assoc_rqst) -
+ (2 * sizeof(uint32_t)));
+ to_be32(&ca_rqst.assoc_cmd.desc_tag, FCNVME_LSDESC_CREATE_ASSOC_CMD);
+ to_be32(&ca_rqst.assoc_cmd.desc_len,
+ sizeof(struct spdk_nvmf_fc_lsdesc_cr_assoc_cmd) -
+ (2 * sizeof(uint32_t)));
+ to_be16(&ca_rqst.assoc_cmd.ersp_ratio, (g_nvmf_transport.opts.max_aq_depth / 2));
+ to_be16(&ca_rqst.assoc_cmd.sqsize, g_nvmf_transport.opts.max_aq_depth - 1);
+ snprintf(&ca_rqst.assoc_cmd.subnqn[0], strlen(subnqn) + 1, "%s", subnqn);
+ snprintf(&ca_rqst.assoc_cmd.hostnqn[0], strlen(host->nqn) + 1, "%s", host->nqn);
+ ls_rqst.rqstbuf.virt = &ca_rqst;
+ ls_rqst.rspbuf.virt = respbuf;
+ ls_rqst.rqst_len = sizeof(struct spdk_nvmf_fc_ls_cr_assoc_rqst);
+ ls_rqst.rsp_len = 0;
+ ls_rqst.rpi = 5000;
+ ls_rqst.private_data = NULL;
+ ls_rqst.s_id = 0;
+ ls_rqst.nport = tgt_port;
+ ls_rqst.rport = &g_rem_port;
+ ls_rqst.nvmf_tgt = &g_nvmf_tgt;
+
+ nvmf_fc_handle_ls_rqst(&ls_rqst);
+ poll_thread(0);
+}
+
+static void
+run_create_conn_test(struct spdk_nvmf_host *host,
+ struct spdk_nvmf_fc_nport *tgt_port,
+ uint64_t assoc_id,
+ uint16_t qid)
+{
+ struct spdk_nvmf_fc_ls_rqst ls_rqst;
+ struct spdk_nvmf_fc_ls_cr_conn_rqst cc_rqst;
+ uint8_t respbuf[128];
+
+ memset(&cc_rqst, 0, sizeof(struct spdk_nvmf_fc_ls_cr_conn_rqst));
+
+ /* fill in request descriptor */
+ cc_rqst.w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
+ to_be32(&cc_rqst.desc_list_len,
+ sizeof(struct spdk_nvmf_fc_ls_cr_conn_rqst) -
+ (2 * sizeof(uint32_t)));
+
+ /* fill in connect command descriptor */
+ to_be32(&cc_rqst.connect_cmd.desc_tag, FCNVME_LSDESC_CREATE_CONN_CMD);
+ to_be32(&cc_rqst.connect_cmd.desc_len,
+ sizeof(struct spdk_nvmf_fc_lsdesc_cr_conn_cmd) -
+ (2 * sizeof(uint32_t)));
+
+ to_be16(&cc_rqst.connect_cmd.ersp_ratio, (g_nvmf_transport.opts.max_queue_depth / 2));
+ to_be16(&cc_rqst.connect_cmd.sqsize, g_nvmf_transport.opts.max_queue_depth - 1);
+ to_be16(&cc_rqst.connect_cmd.qid, qid);
+
+ /* fill in association id descriptor */
+ to_be32(&cc_rqst.assoc_id.desc_tag, FCNVME_LSDESC_ASSOC_ID),
+ to_be32(&cc_rqst.assoc_id.desc_len,
+ sizeof(struct spdk_nvmf_fc_lsdesc_assoc_id) -
+ (2 * sizeof(uint32_t)));
+ cc_rqst.assoc_id.association_id = assoc_id; /* alreday be64 */
+
+ ls_rqst.rqstbuf.virt = &cc_rqst;
+ ls_rqst.rspbuf.virt = respbuf;
+ ls_rqst.rqst_len = sizeof(struct spdk_nvmf_fc_ls_cr_conn_rqst);
+ ls_rqst.rsp_len = 0;
+ ls_rqst.rpi = 5000;
+ ls_rqst.private_data = NULL;
+ ls_rqst.s_id = 0;
+ ls_rqst.nport = tgt_port;
+ ls_rqst.rport = &g_rem_port;
+ ls_rqst.nvmf_tgt = &g_nvmf_tgt;
+
+ nvmf_fc_handle_ls_rqst(&ls_rqst);
+ poll_thread(0);
+}
+
+static void
+run_disconn_test(struct spdk_nvmf_fc_nport *tgt_port,
+ uint64_t assoc_id)
+{
+ struct spdk_nvmf_fc_ls_rqst ls_rqst;
+ struct spdk_nvmf_fc_ls_disconnect_rqst dc_rqst;
+ uint8_t respbuf[128];
+
+ memset(&dc_rqst, 0, sizeof(struct spdk_nvmf_fc_ls_disconnect_rqst));
+
+ /* fill in request descriptor */
+ dc_rqst.w0.ls_cmd = FCNVME_LS_DISCONNECT;
+ to_be32(&dc_rqst.desc_list_len,
+ sizeof(struct spdk_nvmf_fc_ls_disconnect_rqst) -
+ (2 * sizeof(uint32_t)));
+
+ /* fill in disconnect command descriptor */
+ to_be32(&dc_rqst.disconn_cmd.desc_tag, FCNVME_LSDESC_DISCONN_CMD);
+ to_be32(&dc_rqst.disconn_cmd.desc_len,
+ sizeof(struct spdk_nvmf_fc_lsdesc_disconn_cmd) -
+ (2 * sizeof(uint32_t)));
+
+ /* fill in association id descriptor */
+ to_be32(&dc_rqst.assoc_id.desc_tag, FCNVME_LSDESC_ASSOC_ID),
+ to_be32(&dc_rqst.assoc_id.desc_len,
+ sizeof(struct spdk_nvmf_fc_lsdesc_assoc_id) -
+ (2 * sizeof(uint32_t)));
+ dc_rqst.assoc_id.association_id = assoc_id; /* alreday be64 */
+
+ ls_rqst.rqstbuf.virt = &dc_rqst;
+ ls_rqst.rspbuf.virt = respbuf;
+ ls_rqst.rqst_len = sizeof(struct spdk_nvmf_fc_ls_disconnect_rqst);
+ ls_rqst.rsp_len = 0;
+ ls_rqst.rpi = 5000;
+ ls_rqst.private_data = NULL;
+ ls_rqst.s_id = 0;
+ ls_rqst.nport = tgt_port;
+ ls_rqst.rport = &g_rem_port;
+ ls_rqst.nvmf_tgt = &g_nvmf_tgt;
+
+ nvmf_fc_handle_ls_rqst(&ls_rqst);
+ poll_thread(0);
+}
+
+static void
+disconnect_assoc_cb(void *cb_data, uint32_t err)
+{
+ CU_ASSERT(err == 0);
+}
+
+static int
+handle_ca_rsp(struct spdk_nvmf_fc_ls_rqst *ls_rqst, bool max_assoc_test)
+{
+ struct spdk_nvmf_fc_ls_acc_hdr *acc_hdr =
+ (struct spdk_nvmf_fc_ls_acc_hdr *) ls_rqst->rspbuf.virt;
+
+
+ if (acc_hdr->rqst.w0.ls_cmd == FCNVME_LS_CREATE_ASSOCIATION) {
+ if (acc_hdr->w0.ls_cmd == FCNVME_LS_ACC) {
+ struct spdk_nvmf_fc_ls_cr_assoc_acc *acc =
+ (struct spdk_nvmf_fc_ls_cr_assoc_acc *)ls_rqst->rspbuf.virt;
+
+ CU_ASSERT(from_be32(&acc_hdr->desc_list_len) ==
+ sizeof(struct spdk_nvmf_fc_ls_cr_assoc_acc) - 8);
+ CU_ASSERT(from_be32(&acc_hdr->rqst.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_rqst) - 8);
+ CU_ASSERT(from_be32(&acc_hdr->rqst.desc_tag) ==
+ FCNVME_LSDESC_RQST);
+ CU_ASSERT(from_be32(&acc->assoc_id.desc_tag) ==
+ FCNVME_LSDESC_ASSOC_ID);
+ CU_ASSERT(from_be32(&acc->assoc_id.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_assoc_id) - 8);
+ CU_ASSERT(from_be32(&acc->conn_id.desc_tag) ==
+ FCNVME_LSDESC_CONN_ID);
+ CU_ASSERT(from_be32(&acc->conn_id.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_conn_id) - 8);
+
+ g_curr_assoc_id = acc->assoc_id.association_id;
+ g_create_conn_test_cnt++;
+ return 0;
+ } else if (max_assoc_test) {
+ /* reject reason code should be insufficient resources */
+ struct spdk_nvmf_fc_ls_rjt *rjt =
+ (struct spdk_nvmf_fc_ls_rjt *)ls_rqst->rspbuf.virt;
+ if (rjt->rjt.reason_code == FCNVME_RJT_RC_INSUFF_RES) {
+ return LAST_RSLT_STOP_TEST;
+ }
+ }
+ CU_FAIL("Unexpected reject response for create association");
+ } else {
+ CU_FAIL("Response not for create association");
+ }
+
+ return -EINVAL;
+}
+
+static int
+handle_cc_rsp(struct spdk_nvmf_fc_ls_rqst *ls_rqst)
+{
+ struct spdk_nvmf_fc_ls_acc_hdr *acc_hdr =
+ (struct spdk_nvmf_fc_ls_acc_hdr *) ls_rqst->rspbuf.virt;
+
+ if (acc_hdr->rqst.w0.ls_cmd == FCNVME_LS_CREATE_CONNECTION) {
+ if (acc_hdr->w0.ls_cmd == FCNVME_LS_ACC) {
+ struct spdk_nvmf_fc_ls_cr_conn_acc *acc =
+ (struct spdk_nvmf_fc_ls_cr_conn_acc *)ls_rqst->rspbuf.virt;
+
+ CU_ASSERT(from_be32(&acc_hdr->desc_list_len) ==
+ sizeof(struct spdk_nvmf_fc_ls_cr_conn_acc) - 8);
+ CU_ASSERT(from_be32(&acc_hdr->rqst.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_rqst) - 8);
+ CU_ASSERT(from_be32(&acc_hdr->rqst.desc_tag) ==
+ FCNVME_LSDESC_RQST);
+ CU_ASSERT(from_be32(&acc->conn_id.desc_tag) ==
+ FCNVME_LSDESC_CONN_ID);
+ CU_ASSERT(from_be32(&acc->conn_id.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_conn_id) - 8);
+ g_create_conn_test_cnt++;
+ return 0;
+ }
+
+ if (acc_hdr->w0.ls_cmd == FCNVME_LS_RJT) {
+ struct spdk_nvmf_fc_ls_rjt *rjt =
+ (struct spdk_nvmf_fc_ls_rjt *)ls_rqst->rspbuf.virt;
+ if (g_create_conn_test_cnt == g_nvmf_transport.opts.max_qpairs_per_ctrlr) {
+ /* expected to get reject for too many connections */
+ CU_ASSERT(rjt->rjt.reason_code ==
+ FCNVME_RJT_RC_INV_PARAM);
+ CU_ASSERT(rjt->rjt.reason_explanation ==
+ FCNVME_RJT_EXP_INV_Q_ID);
+ } else if (!g_max_assoc_conn_test) {
+ CU_FAIL("Unexpected reject response create connection");
+ }
+ } else {
+ CU_FAIL("Unexpected response code for create connection");
+ }
+ } else {
+ CU_FAIL("Response not for create connection");
+ }
+
+ return -EINVAL;
+}
+
+static int
+handle_disconn_rsp(struct spdk_nvmf_fc_ls_rqst *ls_rqst)
+{
+ struct spdk_nvmf_fc_ls_acc_hdr *acc_hdr =
+ (struct spdk_nvmf_fc_ls_acc_hdr *) ls_rqst->rspbuf.virt;
+
+ if (acc_hdr->rqst.w0.ls_cmd == FCNVME_LS_DISCONNECT) {
+ if (acc_hdr->w0.ls_cmd == FCNVME_LS_ACC) {
+ CU_ASSERT(from_be32(&acc_hdr->desc_list_len) ==
+ sizeof(struct spdk_nvmf_fc_ls_disconnect_acc) - 8);
+ CU_ASSERT(from_be32(&acc_hdr->rqst.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_rqst) - 8);
+ CU_ASSERT(from_be32(&acc_hdr->rqst.desc_tag) ==
+ FCNVME_LSDESC_RQST);
+ return 0;
+ } else {
+ CU_FAIL("Unexpected reject response for disconnect");
+ }
+ } else {
+ CU_FAIL("Response not for create connection");
+ }
+
+ return -EINVAL;
+}
+
+static int
+handle_conn_bad_assoc_rsp(struct spdk_nvmf_fc_ls_rqst *ls_rqst)
+{
+ struct spdk_nvmf_fc_ls_acc_hdr *acc_hdr =
+ (struct spdk_nvmf_fc_ls_acc_hdr *) ls_rqst->rspbuf.virt;
+
+ if (acc_hdr->rqst.w0.ls_cmd == FCNVME_LS_CREATE_CONNECTION) {
+ if (acc_hdr->w0.ls_cmd == FCNVME_LS_RJT) {
+ struct spdk_nvmf_fc_ls_rjt *rjt =
+ (struct spdk_nvmf_fc_ls_rjt *)ls_rqst->rspbuf.virt;
+
+ CU_ASSERT(from_be32(&rjt->desc_list_len) ==
+ sizeof(struct spdk_nvmf_fc_ls_rjt) - 8);
+ CU_ASSERT(from_be32(&rjt->rqst.desc_tag) ==
+ FCNVME_LSDESC_RQST);
+ CU_ASSERT(from_be32(&rjt->rjt.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_rjt) - 8);
+ CU_ASSERT(from_be32(&rjt->rjt.desc_tag) ==
+ FCNVME_LSDESC_RJT);
+ CU_ASSERT(rjt->rjt.reason_code ==
+ FCNVME_RJT_RC_INV_ASSOC);
+ CU_ASSERT(rjt->rjt.reason_explanation ==
+ FCNVME_RJT_EXP_NONE);
+ /* make sure reserved fields are 0 */
+ CU_ASSERT(rjt->rjt.rsvd8 == 0);
+ CU_ASSERT(rjt->rjt.rsvd12 == 0);
+ return 0;
+ } else {
+ CU_FAIL("Unexpected accept response for create conn. on bad assoc_id");
+ }
+ } else {
+ CU_FAIL("Response not for create connection on bad assoc_id");
+ }
+
+ return -EINVAL;
+}
+
+static int
+handle_disconn_bad_assoc_rsp(struct spdk_nvmf_fc_ls_rqst *ls_rqst)
+{
+ struct spdk_nvmf_fc_ls_acc_hdr *acc_hdr =
+ (struct spdk_nvmf_fc_ls_acc_hdr *) ls_rqst->rspbuf.virt;
+
+ if (acc_hdr->rqst.w0.ls_cmd == FCNVME_LS_DISCONNECT) {
+ if (acc_hdr->w0.ls_cmd == FCNVME_LS_RJT) {
+ struct spdk_nvmf_fc_ls_rjt *rjt =
+ (struct spdk_nvmf_fc_ls_rjt *)ls_rqst->rspbuf.virt;
+
+ CU_ASSERT(from_be32(&rjt->desc_list_len) ==
+ sizeof(struct spdk_nvmf_fc_ls_rjt) - 8);
+ CU_ASSERT(from_be32(&rjt->rqst.desc_tag) ==
+ FCNVME_LSDESC_RQST);
+ CU_ASSERT(from_be32(&rjt->rjt.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_rjt) - 8);
+ CU_ASSERT(from_be32(&rjt->rjt.desc_tag) ==
+ FCNVME_LSDESC_RJT);
+ CU_ASSERT(rjt->rjt.reason_code ==
+ FCNVME_RJT_RC_INV_ASSOC);
+ CU_ASSERT(rjt->rjt.reason_explanation ==
+ FCNVME_RJT_EXP_NONE);
+ return 0;
+ } else {
+ CU_FAIL("Unexpected accept response for disconnect on bad assoc_id");
+ }
+ } else {
+ CU_FAIL("Response not for dsconnect on bad assoc_id");
+ }
+
+ return -EINVAL;
+}
+
+
+static struct spdk_nvmf_fc_port g_fc_port = {
+ .num_io_queues = 16,
+};
+
+static struct spdk_nvmf_fc_nport g_tgt_port;
+
+static uint64_t assoc_id[1024];
+
+#define FC_LS_UT_MAX_IO_QUEUES 16
+struct spdk_nvmf_fc_hwqp g_fc_hwqp[FC_LS_UT_MAX_IO_QUEUES];
+struct spdk_nvmf_fc_poll_group g_fgroup[FC_LS_UT_MAX_IO_QUEUES];
+struct spdk_nvmf_poll_group g_poll_group[FC_LS_UT_MAX_IO_QUEUES];
+static bool threads_allocated = false;
+
+static void
+ls_assign_hwqp_threads(void)
+{
+ uint32_t i;
+
+ for (i = 0; i < g_fc_port.num_io_queues; i++) {
+ struct spdk_nvmf_fc_hwqp *hwqp = &g_fc_port.io_queues[i];
+ if (hwqp->thread == NULL) {
+ hwqp->thread = spdk_get_thread();
+ }
+ }
+}
+
+static void
+ls_prepare_threads(void)
+{
+ if (threads_allocated == false) {
+ allocate_threads(8);
+ set_thread(0);
+ }
+ threads_allocated = true;
+}
+
+static void
+setup_polling_threads(void)
+{
+ ls_prepare_threads();
+ set_thread(0);
+ ls_assign_hwqp_threads();
+}
+
+static int
+ls_tests_init(void)
+{
+ uint16_t i;
+
+ bzero(&g_nvmf_tgt, sizeof(g_nvmf_tgt));
+
+ g_nvmf_transport.opts = g_nvmf_transport_opts;
+
+ snprintf(g_nvmf_subsystem.subnqn, sizeof(g_nvmf_subsystem.subnqn), "%s", fc_ut_subsystem_nqn);
+ g_fc_port.hw_port_status = SPDK_FC_PORT_ONLINE;
+ g_fc_port.io_queues = g_fc_hwqp;
+ for (i = 0; i < g_fc_port.num_io_queues; i++) {
+ struct spdk_nvmf_fc_hwqp *hwqp = &g_fc_port.io_queues[i];
+ hwqp->lcore_id = i;
+ hwqp->hwqp_id = i;
+ hwqp->thread = NULL;
+ hwqp->fc_port = &g_fc_port;
+ hwqp->num_conns = 0;
+ hwqp->rq_size = g_hw_queue_depth;
+ TAILQ_INIT(&hwqp->connection_list);
+ TAILQ_INIT(&hwqp->in_use_reqs);
+
+ bzero(&g_poll_group[i], sizeof(struct spdk_nvmf_poll_group));
+ bzero(&g_fgroup[i], sizeof(struct spdk_nvmf_fc_poll_group));
+ TAILQ_INIT(&g_poll_group[i].tgroups);
+ TAILQ_INIT(&g_poll_group[i].qpairs);
+ g_fgroup[i].group.transport = &g_nvmf_transport;
+ g_fgroup[i].group.group = &g_poll_group[i];
+ hwqp->fgroup = &g_fgroup[i];
+ }
+
+ nvmf_fc_ls_init(&g_fc_port);
+ bzero(&g_tgt_port, sizeof(struct spdk_nvmf_fc_nport));
+ g_tgt_port.fc_port = &g_fc_port;
+ TAILQ_INIT(&g_tgt_port.rem_port_list);
+ TAILQ_INIT(&g_tgt_port.fc_associations);
+
+ bzero(&g_rem_port, sizeof(struct spdk_nvmf_fc_remote_port_info));
+ TAILQ_INSERT_TAIL(&g_tgt_port.rem_port_list, &g_rem_port, link);
+
+ return 0;
+}
+
+static int
+ls_tests_fini(void)
+{
+ nvmf_fc_ls_fini(&g_fc_port);
+ free_threads();
+ return 0;
+}
+
+static void
+create_single_assoc_test(void)
+{
+ setup_polling_threads();
+ /* main test driver */
+ g_test_run_type = TEST_RUN_TYPE_CREATE_ASSOC;
+ run_create_assoc_test(fc_ut_subsystem_nqn, fc_ut_host, &g_tgt_port);
+
+ if (g_last_rslt == 0) {
+ /* disconnect the association */
+ g_test_run_type = TEST_RUN_TYPE_DISCONNECT;
+ run_disconn_test(&g_tgt_port, g_curr_assoc_id);
+ g_create_conn_test_cnt = 0;
+ }
+}
+
+static void
+create_max_conns_test(void)
+{
+ uint16_t qid = 1;
+
+ setup_polling_threads();
+ /* main test driver */
+ g_test_run_type = TEST_RUN_TYPE_CREATE_ASSOC;
+ run_create_assoc_test(fc_ut_subsystem_nqn, fc_ut_host, &g_tgt_port);
+
+ if (g_last_rslt == 0) {
+ g_test_run_type = TEST_RUN_TYPE_CREATE_CONN;
+ /* create connections until we get too many connections error */
+ while (g_last_rslt == 0) {
+ if (g_create_conn_test_cnt > g_nvmf_transport.opts.max_qpairs_per_ctrlr) {
+ CU_FAIL("Did not get CIOC failure for too many connections");
+ break;
+ }
+ run_create_conn_test(fc_ut_host, &g_tgt_port, g_curr_assoc_id, qid++);
+ }
+
+ /* disconnect the association */
+ g_last_rslt = 0;
+ g_test_run_type = TEST_RUN_TYPE_DISCONNECT;
+ run_disconn_test(&g_tgt_port, g_curr_assoc_id);
+ g_create_conn_test_cnt = 0;
+ }
+}
+
+static void
+invalid_connection_test(void)
+{
+ setup_polling_threads();
+ /* run test to create connection to invalid association */
+ g_test_run_type = TEST_RUN_TYPE_CONN_BAD_ASSOC;
+ run_create_conn_test(fc_ut_host, &g_tgt_port, g_curr_assoc_id, 1);
+}
+
+static void
+create_max_aq_conns_test(void)
+{
+ /* run test to create max. associations with max. connections */
+ uint32_t i, j;
+ uint32_t create_assoc_test_cnt = 0;
+
+ setup_polling_threads();
+ g_max_assoc_conn_test = 1;
+ g_last_rslt = 0;
+ while (1) {
+ g_test_run_type = TEST_RUN_TYPE_CREATE_MAX_ASSOC;
+ run_create_assoc_test(fc_ut_subsystem_nqn, fc_ut_host, &g_tgt_port);
+ if (g_last_rslt == 0) {
+ assoc_id[create_assoc_test_cnt++] = g_curr_assoc_id;
+ g_test_run_type = TEST_RUN_TYPE_CREATE_CONN;
+ for (j = 1; j < g_nvmf_transport.opts.max_qpairs_per_ctrlr; j++) {
+ if (g_last_rslt == 0) {
+ run_create_conn_test(fc_ut_host, &g_tgt_port, g_curr_assoc_id, (uint16_t) j);
+ }
+ }
+ } else {
+ break;
+ }
+ }
+
+ if (g_last_rslt == LAST_RSLT_STOP_TEST) {
+ uint32_t ma = (((g_hw_queue_depth / g_nvmf_transport.opts.max_queue_depth) *
+ (g_fc_port.num_io_queues - 1))) /
+ (g_nvmf_transport.opts.max_qpairs_per_ctrlr - 1);
+ if (create_assoc_test_cnt < ma) {
+ printf("(%d assocs - should be %d) ", create_assoc_test_cnt, ma);
+ CU_FAIL("Didn't create max. associations");
+ } else {
+ printf("(%d assocs.) ", create_assoc_test_cnt);
+ }
+ g_last_rslt = 0;
+ }
+
+ for (i = 0; i < create_assoc_test_cnt; i++) {
+ int ret;
+ g_spdk_nvmf_fc_xmt_srsr_req = false;
+ ret = nvmf_fc_delete_association(&g_tgt_port, from_be64(&assoc_id[i]), true, false,
+ disconnect_assoc_cb, 0);
+ CU_ASSERT(ret == 0);
+ poll_thread(0);
+
+#if (NVMF_FC_LS_SEND_LS_DISCONNECT == 1)
+ if (ret == 0) {
+ /* check that LS disconnect was sent */
+ CU_ASSERT(g_spdk_nvmf_fc_xmt_srsr_req);
+ }
+#endif
+ }
+ g_max_assoc_conn_test = 0;
+}
+
+static void
+xmt_ls_rsp_failure_test(void)
+{
+ setup_polling_threads();
+ g_test_run_type = TEST_RUN_TYPE_FAIL_LS_RSP;
+ run_create_assoc_test(fc_ut_subsystem_nqn, fc_ut_host, &g_tgt_port);
+ if (g_last_rslt == 0) {
+ /* check target port for associations */
+ CU_ASSERT(g_tgt_port.assoc_count == 0);
+ }
+}
+
+static void
+disconnect_bad_assoc_test(void)
+{
+ setup_polling_threads();
+ g_test_run_type = TEST_RUN_TYPE_DISCONNECT_BAD_ASSOC;
+ run_disconn_test(&g_tgt_port, 0xffff);
+}
+
+/*
+ * SPDK functions that are called by LS processing
+ */
+
+int
+nvmf_fc_xmt_ls_rsp(struct spdk_nvmf_fc_nport *g_tgt_port,
+ struct spdk_nvmf_fc_ls_rqst *ls_rqst)
+{
+ switch (g_test_run_type) {
+ case TEST_RUN_TYPE_CREATE_ASSOC:
+ g_last_rslt = handle_ca_rsp(ls_rqst, false);
+ break;
+ case TEST_RUN_TYPE_CREATE_CONN:
+ g_last_rslt = handle_cc_rsp(ls_rqst);
+ break;
+ case TEST_RUN_TYPE_DISCONNECT:
+ g_last_rslt = handle_disconn_rsp(ls_rqst);
+ break;
+ case TEST_RUN_TYPE_CONN_BAD_ASSOC:
+ g_last_rslt = handle_conn_bad_assoc_rsp(ls_rqst);
+ break;
+ case TEST_RUN_TYPE_FAIL_LS_RSP:
+ g_last_rslt = handle_ca_rsp(ls_rqst, false);
+ return 1;
+ case TEST_RUN_TYPE_DISCONNECT_BAD_ASSOC:
+ g_last_rslt = handle_disconn_bad_assoc_rsp(ls_rqst);
+ break;
+ case TEST_RUN_TYPE_CREATE_MAX_ASSOC:
+ g_last_rslt = handle_ca_rsp(ls_rqst, true);
+ break;
+
+ default:
+ CU_FAIL("LS Response for Invalid Test Type");
+ g_last_rslt = 1;
+ }
+
+ return 0;
+}
+
+int
+nvmf_fc_xmt_srsr_req(struct spdk_nvmf_fc_hwqp *hwqp,
+ struct spdk_nvmf_fc_srsr_bufs *srsr_bufs,
+ spdk_nvmf_fc_caller_cb cb, void *cb_args)
+{
+ struct spdk_nvmf_fc_ls_disconnect_rqst *dc_rqst =
+ (struct spdk_nvmf_fc_ls_disconnect_rqst *)
+ srsr_bufs->rqst;
+
+ CU_ASSERT(dc_rqst->w0.ls_cmd == FCNVME_LS_DISCONNECT);
+ CU_ASSERT(from_be32(&dc_rqst->desc_list_len) ==
+ sizeof(struct spdk_nvmf_fc_ls_disconnect_rqst) -
+ (2 * sizeof(uint32_t)));
+ CU_ASSERT(from_be32(&dc_rqst->assoc_id.desc_tag) ==
+ FCNVME_LSDESC_ASSOC_ID);
+ CU_ASSERT(from_be32(&dc_rqst->assoc_id.desc_len) ==
+ sizeof(struct spdk_nvmf_fc_lsdesc_assoc_id) -
+ (2 * sizeof(uint32_t)));
+
+ g_spdk_nvmf_fc_xmt_srsr_req = true;
+
+ if (cb) {
+ cb(hwqp, 0, cb_args);
+ }
+
+ return 0;
+}
+
+DEFINE_STUB_V(nvmf_fc_request_abort, (struct spdk_nvmf_fc_request *fc_req,
+ bool send_abts, spdk_nvmf_fc_caller_cb cb, void *cb_args));
+DEFINE_STUB_V(spdk_bdev_io_abort, (struct spdk_bdev_io *bdev_io, void *ctx));
+DEFINE_STUB_V(nvmf_fc_request_abort_complete, (void *arg1));
+
+static void
+usage(const char *program_name)
+{
+ printf("%s [options]\n", program_name);
+ printf("options:\n");
+ spdk_log_usage(stdout, "-t");
+ printf(" -i value - Number of IO Queues (default: %u)\n",
+ g_fc_port.num_io_queues);
+ printf(" -d value - HW queue depth (default: %u)\n",
+ g_hw_queue_depth);
+ printf(" -q value - SQ size (default: %u)\n",
+ g_nvmf_transport_opts.max_queue_depth);
+ printf(" -c value - Connection count (default: %u)\n",
+ g_nvmf_transport_opts.max_qpairs_per_ctrlr);
+ printf(" -u test# - Unit test# to run\n");
+ printf(" 0 : Run all tests (default)\n");
+ printf(" 1 : CASS/DISC create single assoc test\n");
+ printf(" 2 : Max. conns. test\n");
+ printf(" 3 : CIOC to invalid assoc_id connection test\n");
+ printf(" 4 : Create/delete max assoc conns test\n");
+ printf(" 5 : LS response failure test\n");
+ printf(" 6 : Disconnect bad assoc_id test\n");
+}
+
+int main(int argc, char **argv)
+{
+ unsigned int num_failures = 0;
+ CU_pSuite suite = NULL;
+ int test = 0;
+ long int val;
+ int op;
+
+ while ((op = getopt(argc, argv, "a:q:c:t:u:d:i:")) != -1) {
+ switch (op) {
+ case 'q':
+ val = spdk_strtol(optarg, 10);
+ if (val < 16) {
+ fprintf(stderr, "SQ size must be at least 16\n");
+ return -EINVAL;
+ }
+ g_nvmf_transport_opts.max_queue_depth = (uint16_t)val;
+ break;
+ case 'c':
+ val = spdk_strtol(optarg, 10);
+ if (val < 2) {
+ fprintf(stderr, "Connection count must be at least 2\n");
+ return -EINVAL;
+ }
+ g_nvmf_transport_opts.max_qpairs_per_ctrlr = (uint16_t)val;
+ break;
+ case 't':
+ if (spdk_log_set_flag(optarg) < 0) {
+ fprintf(stderr, "Unknown trace flag '%s'\n", optarg);
+ usage(argv[0]);
+ return -EINVAL;
+ }
+ break;
+ case 'u':
+ test = (int)spdk_strtol(optarg, 10);
+ break;
+ case 'd':
+ val = spdk_strtol(optarg, 10);
+ if (val < 16) {
+ fprintf(stderr, "HW queue depth must be at least 16\n");
+ return -EINVAL;
+ }
+ g_hw_queue_depth = (uint32_t)val;
+ break;
+ case 'i':
+ val = spdk_strtol(optarg, 10);
+ if (val < 2) {
+ fprintf(stderr, "Number of io queues must be at least 2\n");
+ return -EINVAL;
+ }
+ if (val > FC_LS_UT_MAX_IO_QUEUES) {
+ fprintf(stderr, "Number of io queues can't be greater than %d\n",
+ FC_LS_UT_MAX_IO_QUEUES);
+ return -EINVAL;
+ }
+ g_fc_port.num_io_queues = (uint32_t)val;
+ break;
+
+
+ default:
+ usage(argv[0]);
+ return -EINVAL;
+ }
+ }
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("FC-NVMe LS", ls_tests_init, ls_tests_fini);
+
+ if (test == 0) {
+
+ CU_ADD_TEST(suite, create_single_assoc_test);
+
+ CU_ADD_TEST(suite, create_max_conns_test);
+ CU_ADD_TEST(suite, invalid_connection_test);
+ CU_ADD_TEST(suite, disconnect_bad_assoc_test);
+
+ CU_ADD_TEST(suite, create_max_aq_conns_test);
+ CU_ADD_TEST(suite, xmt_ls_rsp_failure_test);
+
+ } else {
+
+ switch (test) {
+ case 1:
+ CU_ADD_TEST(suite, create_single_assoc_test);
+ break;
+ case 2:
+ CU_ADD_TEST(suite, create_max_conns_test);
+ break;
+ case 3:
+ CU_ADD_TEST(suite, invalid_connection_test);
+ break;
+ case 4:
+ CU_ADD_TEST(suite, create_max_aq_conns_test);
+ break;
+ case 5:
+ CU_ADD_TEST(suite, xmt_ls_rsp_failure_test);
+ break;
+ case 6:
+ CU_ADD_TEST(suite, disconnect_bad_assoc_test);
+ break;
+
+ default:
+ fprintf(stderr, "Invalid test number\n");
+ usage(argv[0]);
+ CU_cleanup_registry();
+ return -EINVAL;
+ }
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/rdma.c/.gitignore b/src/spdk/test/unit/lib/nvmf/rdma.c/.gitignore
new file mode 100644
index 000000000..0adb59d10
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/rdma.c/.gitignore
@@ -0,0 +1 @@
+rdma_ut
diff --git a/src/spdk/test/unit/lib/nvmf/rdma.c/Makefile b/src/spdk/test/unit/lib/nvmf/rdma.c/Makefile
new file mode 100644
index 000000000..ad4998663
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/rdma.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = rdma_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/rdma.c/rdma_ut.c b/src/spdk/test/unit/lib/nvmf/rdma.c/rdma_ut.c
new file mode 100644
index 000000000..b0af58d18
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/rdma.c/rdma_ut.c
@@ -0,0 +1,1283 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+#include "common/lib/test_rdma.c"
+#include "nvmf/rdma.c"
+#include "nvmf/transport.c"
+
+uint64_t g_mr_size;
+uint64_t g_mr_next_size;
+struct ibv_mr g_rdma_mr;
+
+#define RDMA_UT_UNITS_IN_MAX_IO 16
+
+struct spdk_nvmf_transport_opts g_rdma_ut_transport_opts = {
+ .max_queue_depth = SPDK_NVMF_RDMA_DEFAULT_MAX_QUEUE_DEPTH,
+ .max_qpairs_per_ctrlr = SPDK_NVMF_RDMA_DEFAULT_MAX_QPAIRS_PER_CTRLR,
+ .in_capsule_data_size = SPDK_NVMF_RDMA_DEFAULT_IN_CAPSULE_DATA_SIZE,
+ .max_io_size = (SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE * RDMA_UT_UNITS_IN_MAX_IO),
+ .io_unit_size = SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE,
+ .max_aq_depth = SPDK_NVMF_RDMA_DEFAULT_AQ_DEPTH,
+ .num_shared_buffers = SPDK_NVMF_RDMA_DEFAULT_NUM_SHARED_BUFFERS,
+};
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
+ uint64_t size, uint64_t translation), 0);
+DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
+ uint64_t size), 0);
+DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation,
+ const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL);
+DEFINE_STUB(spdk_nvmf_qpair_disconnect, int, (struct spdk_nvmf_qpair *qpair,
+ nvmf_qpair_disconnect_cb cb_fn, void *ctx), 0);
+DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap));
+
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name,
+ uint16_t tpoint_id, uint8_t owner_type, uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_type, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+
+DEFINE_STUB_V(spdk_nvmf_ctrlr_data_init, (struct spdk_nvmf_transport_opts *opts,
+ struct spdk_nvmf_ctrlr_data *cdata));
+DEFINE_STUB_V(spdk_nvmf_request_exec, (struct spdk_nvmf_request *req));
+DEFINE_STUB(spdk_nvmf_request_complete, int, (struct spdk_nvmf_request *req), 0);
+DEFINE_STUB(spdk_nvme_transport_id_compare, int, (const struct spdk_nvme_transport_id *trid1,
+ const struct spdk_nvme_transport_id *trid2), 0);
+DEFINE_STUB_V(nvmf_ctrlr_abort_aer, (struct spdk_nvmf_ctrlr *ctrlr));
+DEFINE_STUB(spdk_nvmf_request_get_dif_ctx, bool, (struct spdk_nvmf_request *req,
+ struct spdk_dif_ctx *dif_ctx), false);
+DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
+ enum spdk_nvme_transport_type trtype));
+DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair));
+DEFINE_STUB(nvmf_ctrlr_abort_request, int, (struct spdk_nvmf_request *req), 0);
+
+const char *
+spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
+{
+ switch (trtype) {
+ case SPDK_NVME_TRANSPORT_PCIE:
+ return "PCIe";
+ case SPDK_NVME_TRANSPORT_RDMA:
+ return "RDMA";
+ case SPDK_NVME_TRANSPORT_FC:
+ return "FC";
+ default:
+ return NULL;
+ }
+}
+
+int
+spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring)
+{
+ int len, i;
+
+ if (trstring == NULL) {
+ return -EINVAL;
+ }
+
+ len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN);
+ if (len == SPDK_NVMF_TRSTRING_MAX_LEN) {
+ return -EINVAL;
+ }
+
+ /* cast official trstring to uppercase version of input. */
+ for (i = 0; i < len; i++) {
+ trid->trstring[i] = toupper(trstring[i]);
+ }
+ return 0;
+}
+
+uint64_t
+spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size)
+{
+ if (g_mr_size != 0) {
+ *(uint32_t *)size = g_mr_size;
+ if (g_mr_next_size != 0) {
+ g_mr_size = g_mr_next_size;
+ }
+ }
+
+ return (uint64_t)&g_rdma_mr;
+}
+
+static void reset_nvmf_rdma_request(struct spdk_nvmf_rdma_request *rdma_req)
+{
+ int i;
+
+ rdma_req->req.length = 0;
+ rdma_req->req.data_from_pool = false;
+ rdma_req->req.data = NULL;
+ rdma_req->data.wr.num_sge = 0;
+ rdma_req->data.wr.wr.rdma.remote_addr = 0;
+ rdma_req->data.wr.wr.rdma.rkey = 0;
+ memset(&rdma_req->req.dif, 0, sizeof(rdma_req->req.dif));
+
+ for (i = 0; i < SPDK_NVMF_MAX_SGL_ENTRIES; i++) {
+ rdma_req->req.iov[i].iov_base = 0;
+ rdma_req->req.iov[i].iov_len = 0;
+ rdma_req->req.buffers[i] = 0;
+ rdma_req->data.wr.sg_list[i].addr = 0;
+ rdma_req->data.wr.sg_list[i].length = 0;
+ rdma_req->data.wr.sg_list[i].lkey = 0;
+ }
+ rdma_req->req.iovcnt = 0;
+}
+
+static void
+test_spdk_nvmf_rdma_request_parse_sgl(void)
+{
+ struct spdk_nvmf_rdma_transport rtransport;
+ struct spdk_nvmf_rdma_device device;
+ struct spdk_nvmf_rdma_request rdma_req = {};
+ struct spdk_nvmf_rdma_recv recv;
+ struct spdk_nvmf_rdma_poll_group group;
+ struct spdk_nvmf_rdma_qpair rqpair;
+ struct spdk_nvmf_rdma_poller poller;
+ union nvmf_c2h_msg cpl;
+ union nvmf_h2c_msg cmd;
+ struct spdk_nvme_sgl_descriptor *sgl;
+ struct spdk_nvmf_transport_pg_cache_buf bufs[4];
+ struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}};
+ struct spdk_nvmf_rdma_request_data data;
+ struct spdk_nvmf_transport_pg_cache_buf buffer;
+ struct spdk_nvmf_transport_pg_cache_buf *buffer_ptr;
+ int rc, i;
+
+ data.wr.sg_list = data.sgl;
+ STAILQ_INIT(&group.group.buf_cache);
+ group.group.buf_cache_size = 0;
+ group.group.buf_cache_count = 0;
+ group.group.transport = &rtransport.transport;
+ STAILQ_INIT(&group.retired_bufs);
+ poller.group = &group;
+ rqpair.poller = &poller;
+ rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
+
+ sgl = &cmd.nvme_cmd.dptr.sgl1;
+ rdma_req.recv = &recv;
+ rdma_req.req.cmd = &cmd;
+ rdma_req.req.rsp = &cpl;
+ rdma_req.data.wr.sg_list = rdma_req.data.sgl;
+ rdma_req.req.qpair = &rqpair.qpair;
+ rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
+
+ rtransport.transport.opts = g_rdma_ut_transport_opts;
+ rtransport.data_wr_pool = NULL;
+ rtransport.transport.data_buf_pool = NULL;
+
+ device.attr.device_cap_flags = 0;
+ g_rdma_mr.lkey = 0xABCD;
+ sgl->keyed.key = 0xEEEE;
+ sgl->address = 0xFFFF;
+ rdma_req.recv->buf = (void *)0xDDDD;
+
+ /* Test 1: sgl type: keyed data block subtype: address */
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+
+ /* Part 1: simple I/O, one SGL smaller than the transport io unit size */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2;
+
+ device.map = (void *)0x0;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 1);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey);
+
+ /* Part 2: simple I/O, one SGL larger than the transport io unit size (equal to the max io size) */
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO);
+ CU_ASSERT(rdma_req.data.wr.num_sge == RDMA_UT_UNITS_IN_MAX_IO);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ for (i = 0; i < RDMA_UT_UNITS_IN_MAX_IO; i++) {
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
+ }
+
+ /* Part 3: simple I/O one SGL larger than the transport max io size */
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->keyed.length = rtransport.transport.opts.max_io_size * 2;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == -1);
+
+ /* Part 4: Pretend there are no buffer pools */
+ MOCK_SET(spdk_mempool_get, NULL);
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->keyed.length = rtransport.transport.opts.io_unit_size * RDMA_UT_UNITS_IN_MAX_IO;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == false);
+ CU_ASSERT(rdma_req.req.data == NULL);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 0);
+ CU_ASSERT(rdma_req.req.buffers[0] == NULL);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 0);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == 0);
+
+ rdma_req.recv->buf = (void *)0xDDDD;
+ /* Test 2: sgl type: keyed data block subtype: offset (in capsule data) */
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_DATA_BLOCK;
+ sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
+
+ /* Part 1: Normal I/O smaller than in capsule data size no offset */
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->address = 0;
+ sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data == (void *)0xDDDD);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.in_capsule_data_size);
+ CU_ASSERT(rdma_req.req.data_from_pool == false);
+
+ /* Part 2: I/O offset + length too large */
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->address = rtransport.transport.opts.in_capsule_data_size;
+ sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == -1);
+
+ /* Part 3: I/O too large */
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->address = 0;
+ sgl->unkeyed.length = rtransport.transport.opts.in_capsule_data_size * 2;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == -1);
+
+ /* Test 3: Multi SGL */
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
+ sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
+ sgl->address = 0;
+ rdma_req.recv->buf = (void *)&sgl_desc;
+ MOCK_SET(spdk_mempool_get, &data);
+
+ /* part 1: 2 segments each with 1 wr. */
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
+ for (i = 0; i < 2; i++) {
+ sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+ sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size;
+ sgl_desc[i].address = 0x4000 + i * rtransport.transport.opts.io_unit_size;
+ sgl_desc[i].keyed.key = 0x44;
+ }
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 2);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 1);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
+ CU_ASSERT(rdma_req.data.wr.next == &data.wr);
+ CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size);
+ CU_ASSERT(data.wr.num_sge == 1);
+ CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
+
+ /* part 2: 2 segments, each with 1 wr containing 8 sge_elements */
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
+ for (i = 0; i < 2; i++) {
+ sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+ sgl_desc[i].keyed.length = rtransport.transport.opts.io_unit_size * 8;
+ sgl_desc[i].address = 0x4000 + i * 8 * rtransport.transport.opts.io_unit_size;
+ sgl_desc[i].keyed.key = 0x44;
+ }
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16);
+ CU_ASSERT(rdma_req.req.iovcnt == 16);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 8);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
+ CU_ASSERT(rdma_req.data.wr.next == &data.wr);
+ CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 8);
+ CU_ASSERT(data.wr.num_sge == 8);
+ CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
+
+ /* part 3: 2 segments, one very large, one very small */
+ reset_nvmf_rdma_request(&rdma_req);
+ for (i = 0; i < 2; i++) {
+ sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+ sgl_desc[i].keyed.key = 0x44;
+ }
+
+ sgl_desc[0].keyed.length = rtransport.transport.opts.io_unit_size * 15 +
+ rtransport.transport.opts.io_unit_size / 2;
+ sgl_desc[0].address = 0x4000;
+ sgl_desc[1].keyed.length = rtransport.transport.opts.io_unit_size / 2;
+ sgl_desc[1].address = 0x4000 + rtransport.transport.opts.io_unit_size * 15 +
+ rtransport.transport.opts.io_unit_size / 2;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 16);
+ CU_ASSERT(rdma_req.req.iovcnt == 17);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 16);
+ for (i = 0; i < 15; i++) {
+ CU_ASSERT(rdma_req.data.sgl[i].length == rtransport.transport.opts.io_unit_size);
+ }
+ CU_ASSERT(rdma_req.data.sgl[15].length == rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
+ CU_ASSERT(rdma_req.data.wr.next == &data.wr);
+ CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + rtransport.transport.opts.io_unit_size * 15 +
+ rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT(data.sgl[0].length == rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT(data.wr.num_sge == 1);
+ CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
+
+ /* Test 4: use PG buffer cache */
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+ sgl->address = 0xFFFF;
+ rdma_req.recv->buf = (void *)0xDDDD;
+ g_rdma_mr.lkey = 0xABCD;
+ sgl->keyed.key = 0xEEEE;
+
+ for (i = 0; i < 4; i++) {
+ STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link);
+ }
+
+ /* part 1: use the four buffers from the pg cache */
+ group.group.buf_cache_size = 4;
+ group.group.buf_cache_count = 4;
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ sgl->keyed.length = rtransport.transport.opts.io_unit_size * 4;
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
+ CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK));
+ CU_ASSERT(rdma_req.data.wr.num_sge == 4);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT(group.group.buf_cache_count == 0);
+ CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
+ for (i = 0; i < 4; i++) {
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
+ }
+
+ /* part 2: now that we have used the buffers from the cache, try again. We should get mempool buffers. */
+ reset_nvmf_rdma_request(&rdma_req);
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 4);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT(group.group.buf_cache_count == 0);
+ CU_ASSERT(STAILQ_EMPTY(&group.group.buf_cache));
+ for (i = 0; i < 4; i++) {
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
+ CU_ASSERT(group.group.buf_cache_count == 0);
+ }
+
+ /* part 3: half and half */
+ group.group.buf_cache_count = 2;
+
+ for (i = 0; i < 2; i++) {
+ STAILQ_INSERT_TAIL(&group.group.buf_cache, &bufs[i], link);
+ }
+ reset_nvmf_rdma_request(&rdma_req);
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size * 4);
+ CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&bufs[0] + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK));
+ CU_ASSERT(rdma_req.data.wr.num_sge == 4);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT(group.group.buf_cache_count == 0);
+ for (i = 0; i < 2; i++) {
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == (uint64_t)&bufs[i]);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (((uint64_t)&bufs[i] + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
+ }
+ for (i = 2; i < 4; i++) {
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[i] == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == rtransport.transport.opts.io_unit_size);
+ }
+
+ reset_nvmf_rdma_request(&rdma_req);
+ /* Test 5 dealing with a buffer split over two Memory Regions */
+ MOCK_SET(spdk_mempool_get, (void *)&buffer);
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+ sgl->keyed.length = rtransport.transport.opts.io_unit_size / 2;
+ g_mr_size = rtransport.transport.opts.io_unit_size / 4;
+ g_mr_next_size = rtransport.transport.opts.io_unit_size / 2;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK));
+ CU_ASSERT(rdma_req.data.wr.num_sge == 1);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT(rdma_req.req.buffers[0] == &buffer);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK));
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].length == rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey);
+ buffer_ptr = STAILQ_FIRST(&group.retired_bufs);
+ CU_ASSERT(buffer_ptr == &buffer);
+ STAILQ_REMOVE(&group.retired_bufs, buffer_ptr, spdk_nvmf_transport_pg_cache_buf, link);
+ CU_ASSERT(STAILQ_EMPTY(&group.retired_bufs));
+ g_mr_size = 0;
+ g_mr_next_size = 0;
+
+ reset_nvmf_rdma_request(&rdma_req);
+}
+
+static struct spdk_nvmf_rdma_recv *
+create_recv(struct spdk_nvmf_rdma_qpair *rqpair, enum spdk_nvme_nvm_opcode opc)
+{
+ struct spdk_nvmf_rdma_recv *rdma_recv;
+ union nvmf_h2c_msg *cmd;
+ struct spdk_nvme_sgl_descriptor *sgl;
+
+ rdma_recv = calloc(1, sizeof(*rdma_recv));
+ rdma_recv->qpair = rqpair;
+ cmd = calloc(1, sizeof(*cmd));
+ rdma_recv->sgl[0].addr = (uintptr_t)cmd;
+ cmd->nvme_cmd.opc = opc;
+ sgl = &cmd->nvme_cmd.dptr.sgl1;
+ sgl->keyed.key = 0xEEEE;
+ sgl->address = 0xFFFF;
+ sgl->keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+ sgl->keyed.length = 1;
+
+ return rdma_recv;
+}
+
+static void
+free_recv(struct spdk_nvmf_rdma_recv *rdma_recv)
+{
+ free((void *)rdma_recv->sgl[0].addr);
+ free(rdma_recv);
+}
+
+static struct spdk_nvmf_rdma_request *
+create_req(struct spdk_nvmf_rdma_qpair *rqpair,
+ struct spdk_nvmf_rdma_recv *rdma_recv)
+{
+ struct spdk_nvmf_rdma_request *rdma_req;
+ union nvmf_c2h_msg *cpl;
+
+ rdma_req = calloc(1, sizeof(*rdma_req));
+ rdma_req->recv = rdma_recv;
+ rdma_req->req.qpair = &rqpair->qpair;
+ rdma_req->state = RDMA_REQUEST_STATE_NEW;
+ rdma_req->data.wr.wr_id = (uintptr_t)&rdma_req->data.rdma_wr;
+ rdma_req->data.wr.sg_list = rdma_req->data.sgl;
+ cpl = calloc(1, sizeof(*cpl));
+ rdma_req->rsp.sgl[0].addr = (uintptr_t)cpl;
+ rdma_req->req.rsp = cpl;
+
+ return rdma_req;
+}
+
+static void
+free_req(struct spdk_nvmf_rdma_request *rdma_req)
+{
+ free((void *)rdma_req->rsp.sgl[0].addr);
+ free(rdma_req);
+}
+
+static void
+qpair_reset(struct spdk_nvmf_rdma_qpair *rqpair,
+ struct spdk_nvmf_rdma_poller *poller,
+ struct spdk_nvmf_rdma_device *device,
+ struct spdk_nvmf_rdma_resources *resources)
+{
+ memset(rqpair, 0, sizeof(*rqpair));
+ STAILQ_INIT(&rqpair->pending_rdma_write_queue);
+ STAILQ_INIT(&rqpair->pending_rdma_read_queue);
+ rqpair->poller = poller;
+ rqpair->device = device;
+ rqpair->resources = resources;
+ rqpair->qpair.qid = 1;
+ rqpair->ibv_state = IBV_QPS_RTS;
+ rqpair->qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
+ rqpair->max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
+ rqpair->max_send_depth = 16;
+ rqpair->max_read_depth = 16;
+ resources->recvs_to_post.first = resources->recvs_to_post.last = NULL;
+}
+
+static void
+poller_reset(struct spdk_nvmf_rdma_poller *poller,
+ struct spdk_nvmf_rdma_poll_group *group)
+{
+ memset(poller, 0, sizeof(*poller));
+ STAILQ_INIT(&poller->qpairs_pending_recv);
+ STAILQ_INIT(&poller->qpairs_pending_send);
+ poller->group = group;
+}
+
+static void
+test_spdk_nvmf_rdma_request_process(void)
+{
+ struct spdk_nvmf_rdma_transport rtransport = {};
+ struct spdk_nvmf_rdma_poll_group group = {};
+ struct spdk_nvmf_rdma_poller poller = {};
+ struct spdk_nvmf_rdma_device device = {};
+ struct spdk_nvmf_rdma_resources resources = {};
+ struct spdk_nvmf_rdma_qpair rqpair = {};
+ struct spdk_nvmf_rdma_recv *rdma_recv;
+ struct spdk_nvmf_rdma_request *rdma_req;
+ bool progress;
+
+ STAILQ_INIT(&group.group.buf_cache);
+ STAILQ_INIT(&group.group.pending_buf_queue);
+ group.group.buf_cache_size = 0;
+ group.group.buf_cache_count = 0;
+ poller_reset(&poller, &group);
+ qpair_reset(&rqpair, &poller, &device, &resources);
+
+ rtransport.transport.opts = g_rdma_ut_transport_opts;
+ rtransport.transport.data_buf_pool = spdk_mempool_create("test_data_pool", 16, 128, 0, 0);
+ rtransport.data_wr_pool = spdk_mempool_create("test_wr_pool", 128,
+ sizeof(struct spdk_nvmf_rdma_request_data),
+ 0, 0);
+ MOCK_CLEAR(spdk_mempool_get);
+
+ device.attr.device_cap_flags = 0;
+ device.map = (void *)0x0;
+ g_rdma_mr.lkey = 0xABCD;
+
+ /* Test 1: single SGL READ request */
+ rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_READ);
+ rdma_req = create_req(&rqpair, rdma_recv);
+ rqpair.current_recv_depth = 1;
+ /* NEW -> EXECUTING */
+ progress = nvmf_rdma_request_process(&rtransport, rdma_req);
+ CU_ASSERT(progress == true);
+ CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING);
+ CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
+ /* EXECUTED -> TRANSFERRING_C2H */
+ rdma_req->state = RDMA_REQUEST_STATE_EXECUTED;
+ progress = nvmf_rdma_request_process(&rtransport, rdma_req);
+ CU_ASSERT(progress == true);
+ CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_CONTROLLER_TO_HOST);
+ CU_ASSERT(rdma_req->recv == NULL);
+ CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr);
+ CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr);
+ /* COMPLETED -> FREE */
+ rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
+ progress = nvmf_rdma_request_process(&rtransport, rdma_req);
+ CU_ASSERT(progress == true);
+ CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE);
+
+ free_recv(rdma_recv);
+ free_req(rdma_req);
+ poller_reset(&poller, &group);
+ qpair_reset(&rqpair, &poller, &device, &resources);
+
+ /* Test 2: single SGL WRITE request */
+ rdma_recv = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
+ rdma_req = create_req(&rqpair, rdma_recv);
+ rqpair.current_recv_depth = 1;
+ /* NEW -> TRANSFERRING_H2C */
+ progress = nvmf_rdma_request_process(&rtransport, rdma_req);
+ CU_ASSERT(progress == true);
+ CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
+ CU_ASSERT(rdma_req->req.xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
+ STAILQ_INIT(&poller.qpairs_pending_send);
+ /* READY_TO_EXECUTE -> EXECUTING */
+ rdma_req->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
+ progress = nvmf_rdma_request_process(&rtransport, rdma_req);
+ CU_ASSERT(progress == true);
+ CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_EXECUTING);
+ /* EXECUTED -> COMPLETING */
+ rdma_req->state = RDMA_REQUEST_STATE_EXECUTED;
+ progress = nvmf_rdma_request_process(&rtransport, rdma_req);
+ CU_ASSERT(progress == true);
+ CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_COMPLETING);
+ CU_ASSERT(rdma_req->recv == NULL);
+ CU_ASSERT(resources.recvs_to_post.first == &rdma_recv->wr);
+ CU_ASSERT(resources.recvs_to_post.last == &rdma_recv->wr);
+ /* COMPLETED -> FREE */
+ rdma_req->state = RDMA_REQUEST_STATE_COMPLETED;
+ progress = nvmf_rdma_request_process(&rtransport, rdma_req);
+ CU_ASSERT(progress == true);
+ CU_ASSERT(rdma_req->state == RDMA_REQUEST_STATE_FREE);
+
+ free_recv(rdma_recv);
+ free_req(rdma_req);
+ poller_reset(&poller, &group);
+ qpair_reset(&rqpair, &poller, &device, &resources);
+
+ /* Test 3: WRITE+WRITE ibv_send batching */
+ {
+ struct spdk_nvmf_rdma_recv *recv1, *recv2;
+ struct spdk_nvmf_rdma_request *req1, *req2;
+ recv1 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
+ req1 = create_req(&rqpair, recv1);
+ recv2 = create_recv(&rqpair, SPDK_NVME_OPC_WRITE);
+ req2 = create_req(&rqpair, recv2);
+
+ /* WRITE 1: NEW -> TRANSFERRING_H2C */
+ rqpair.current_recv_depth = 1;
+ nvmf_rdma_request_process(&rtransport, req1);
+ CU_ASSERT(req1->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
+
+ /* WRITE 2: NEW -> TRANSFERRING_H2C */
+ rqpair.current_recv_depth = 2;
+ nvmf_rdma_request_process(&rtransport, req2);
+ CU_ASSERT(req2->state == RDMA_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER);
+
+ STAILQ_INIT(&poller.qpairs_pending_send);
+
+ /* WRITE 1 completes before WRITE 2 has finished RDMA reading */
+ /* WRITE 1: READY_TO_EXECUTE -> EXECUTING */
+ req1->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
+ nvmf_rdma_request_process(&rtransport, req1);
+ CU_ASSERT(req1->state == RDMA_REQUEST_STATE_EXECUTING);
+ /* WRITE 1: EXECUTED -> COMPLETING */
+ req1->state = RDMA_REQUEST_STATE_EXECUTED;
+ nvmf_rdma_request_process(&rtransport, req1);
+ CU_ASSERT(req1->state == RDMA_REQUEST_STATE_COMPLETING);
+ STAILQ_INIT(&poller.qpairs_pending_send);
+ /* WRITE 1: COMPLETED -> FREE */
+ req1->state = RDMA_REQUEST_STATE_COMPLETED;
+ nvmf_rdma_request_process(&rtransport, req1);
+ CU_ASSERT(req1->state == RDMA_REQUEST_STATE_FREE);
+
+ /* Now WRITE 2 has finished reading and completes */
+ /* WRITE 2: COMPLETED -> FREE */
+ /* WRITE 2: READY_TO_EXECUTE -> EXECUTING */
+ req2->state = RDMA_REQUEST_STATE_READY_TO_EXECUTE;
+ nvmf_rdma_request_process(&rtransport, req2);
+ CU_ASSERT(req2->state == RDMA_REQUEST_STATE_EXECUTING);
+ /* WRITE 1: EXECUTED -> COMPLETING */
+ req2->state = RDMA_REQUEST_STATE_EXECUTED;
+ nvmf_rdma_request_process(&rtransport, req2);
+ CU_ASSERT(req2->state == RDMA_REQUEST_STATE_COMPLETING);
+ STAILQ_INIT(&poller.qpairs_pending_send);
+ /* WRITE 1: COMPLETED -> FREE */
+ req2->state = RDMA_REQUEST_STATE_COMPLETED;
+ nvmf_rdma_request_process(&rtransport, req2);
+ CU_ASSERT(req2->state == RDMA_REQUEST_STATE_FREE);
+
+ free_recv(recv1);
+ free_req(req1);
+ free_recv(recv2);
+ free_req(req2);
+ poller_reset(&poller, &group);
+ qpair_reset(&rqpair, &poller, &device, &resources);
+ }
+
+ spdk_mempool_free(rtransport.transport.data_buf_pool);
+ spdk_mempool_free(rtransport.data_wr_pool);
+}
+
+#define TEST_GROUPS_COUNT 5
+static void
+test_nvmf_rdma_get_optimal_poll_group(void)
+{
+ struct spdk_nvmf_rdma_transport rtransport = {};
+ struct spdk_nvmf_transport *transport = &rtransport.transport;
+ struct spdk_nvmf_rdma_qpair rqpair = {};
+ struct spdk_nvmf_transport_poll_group *groups[TEST_GROUPS_COUNT];
+ struct spdk_nvmf_rdma_poll_group *rgroups[TEST_GROUPS_COUNT];
+ struct spdk_nvmf_transport_poll_group *result;
+ uint32_t i;
+
+ rqpair.qpair.transport = transport;
+ pthread_mutex_init(&rtransport.lock, NULL);
+ TAILQ_INIT(&rtransport.poll_groups);
+
+ for (i = 0; i < TEST_GROUPS_COUNT; i++) {
+ groups[i] = nvmf_rdma_poll_group_create(transport);
+ CU_ASSERT(groups[i] != NULL);
+ rgroups[i] = SPDK_CONTAINEROF(groups[i], struct spdk_nvmf_rdma_poll_group, group);
+ groups[i]->transport = transport;
+ }
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[0]);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[0]);
+
+ /* Emulate connection of %TEST_GROUPS_COUNT% initiators - each creates 1 admin and 1 io qp */
+ for (i = 0; i < TEST_GROUPS_COUNT; i++) {
+ rqpair.qpair.qid = 0;
+ result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
+ CU_ASSERT(result == groups[i]);
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i]);
+
+ rqpair.qpair.qid = 1;
+ result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
+ CU_ASSERT(result == groups[i]);
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[(i + 1) % TEST_GROUPS_COUNT]);
+ }
+ /* wrap around, admin/io pg point to the first pg
+ Destroy all poll groups except of the last one */
+ for (i = 0; i < TEST_GROUPS_COUNT - 1; i++) {
+ nvmf_rdma_poll_group_destroy(groups[i]);
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[i + 1]);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[i + 1]);
+ }
+
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
+
+ /* Check that pointers to the next admin/io poll groups are not changed */
+ rqpair.qpair.qid = 0;
+ result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
+ CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]);
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
+
+ rqpair.qpair.qid = 1;
+ result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
+ CU_ASSERT(result == groups[TEST_GROUPS_COUNT - 1]);
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == rgroups[TEST_GROUPS_COUNT - 1]);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == rgroups[TEST_GROUPS_COUNT - 1]);
+
+ /* Remove the last poll group, check that pointers are NULL */
+ nvmf_rdma_poll_group_destroy(groups[TEST_GROUPS_COUNT - 1]);
+ CU_ASSERT(rtransport.conn_sched.next_admin_pg == NULL);
+ CU_ASSERT(rtransport.conn_sched.next_io_pg == NULL);
+
+ /* Request optimal poll group, result must be NULL */
+ rqpair.qpair.qid = 0;
+ result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
+ CU_ASSERT(result == NULL);
+
+ rqpair.qpair.qid = 1;
+ result = nvmf_rdma_get_optimal_poll_group(&rqpair.qpair);
+ CU_ASSERT(result == NULL);
+
+ pthread_mutex_destroy(&rtransport.lock);
+}
+#undef TEST_GROUPS_COUNT
+
+static void
+test_spdk_nvmf_rdma_request_parse_sgl_with_md(void)
+{
+ struct spdk_nvmf_rdma_transport rtransport;
+ struct spdk_nvmf_rdma_device device;
+ struct spdk_nvmf_rdma_request rdma_req = {};
+ struct spdk_nvmf_rdma_recv recv;
+ struct spdk_nvmf_rdma_poll_group group;
+ struct spdk_nvmf_rdma_qpair rqpair;
+ struct spdk_nvmf_rdma_poller poller;
+ union nvmf_c2h_msg cpl;
+ union nvmf_h2c_msg cmd;
+ struct spdk_nvme_sgl_descriptor *sgl;
+ struct spdk_nvme_sgl_descriptor sgl_desc[SPDK_NVMF_MAX_SGL_ENTRIES] = {{0}};
+ struct spdk_nvmf_rdma_request_data data;
+ struct spdk_nvmf_transport_pg_cache_buf buffer;
+ struct spdk_nvmf_transport_pg_cache_buf *buffer_ptr;
+ const uint32_t data_bs = 512;
+ const uint32_t md_size = 8;
+ int rc, i;
+ void *aligned_buffer;
+
+ data.wr.sg_list = data.sgl;
+ STAILQ_INIT(&group.group.buf_cache);
+ group.group.buf_cache_size = 0;
+ group.group.buf_cache_count = 0;
+ group.group.transport = &rtransport.transport;
+ STAILQ_INIT(&group.retired_bufs);
+ poller.group = &group;
+ rqpair.poller = &poller;
+ rqpair.max_send_sge = SPDK_NVMF_MAX_SGL_ENTRIES;
+
+ sgl = &cmd.nvme_cmd.dptr.sgl1;
+ rdma_req.recv = &recv;
+ rdma_req.req.cmd = &cmd;
+ rdma_req.req.rsp = &cpl;
+ rdma_req.data.wr.sg_list = rdma_req.data.sgl;
+ rdma_req.req.qpair = &rqpair.qpair;
+ rdma_req.req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
+
+ rtransport.transport.opts = g_rdma_ut_transport_opts;
+ rtransport.data_wr_pool = NULL;
+ rtransport.transport.data_buf_pool = NULL;
+
+ device.attr.device_cap_flags = 0;
+ device.map = NULL;
+ g_rdma_mr.lkey = 0xABCD;
+ sgl->keyed.key = 0xEEEE;
+ sgl->address = 0xFFFF;
+ rdma_req.recv->buf = (void *)0xDDDD;
+
+ /* Test 1: sgl type: keyed data block subtype: address */
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl->keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+
+ /* Part 1: simple I/O, one SGL smaller than the transport io unit size, block size 512 */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = data_bs * 8;
+ sgl->keyed.length = data_bs * 4;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 4);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 4);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
+
+ for (i = 0; i < 4; ++i) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
+ }
+
+ /* Part 2: simple I/O, one SGL equal to io unit size, io_unit_size is not aligned with md_size,
+ block size 512 */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = data_bs * 4;
+ sgl->keyed.length = data_bs * 4;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 4);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 5);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
+
+ for (i = 0; i < 3; ++i) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
+ }
+ CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488);
+ CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == g_rdma_mr.lkey);
+
+ /* 2nd buffer consumed */
+ CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24);
+ CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == g_rdma_mr.lkey);
+
+ /* Part 3: simple I/O, one SGL equal io unit size, io_unit_size is equal to block size 512 bytes */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = data_bs;
+ sgl->keyed.length = data_bs;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == data_bs + md_size);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 1);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
+
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].length == data_bs);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey);
+
+ CU_ASSERT(rdma_req.req.iovcnt == 2);
+ CU_ASSERT(rdma_req.req.iov[0].iov_base == (void *)((unsigned long)0x2000));
+ CU_ASSERT(rdma_req.req.iov[0].iov_len == data_bs);
+ /* 2nd buffer consumed for metadata */
+ CU_ASSERT(rdma_req.req.iov[1].iov_base == (void *)((unsigned long)0x2000));
+ CU_ASSERT(rdma_req.req.iov[1].iov_len == md_size);
+
+ /* Part 4: simple I/O, one SGL equal io unit size, io_unit_size is aligned with md_size,
+ block size 512 */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4;
+ sgl->keyed.length = data_bs * 4;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 4);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 4);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
+
+ for (i = 0; i < 4; ++i) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
+ }
+
+ /* Part 5: simple I/O, one SGL equal to 2x io unit size, io_unit_size is aligned with md_size,
+ block size 512 */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 2;
+ sgl->keyed.length = data_bs * 4;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 4);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 4);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
+
+ for (i = 0; i < 2; ++i) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
+ }
+ for (i = 0; i < 2; ++i) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].addr == 0x2000 + i * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i + 2].length == data_bs);
+ }
+
+ /* Part 6: simple I/O, one SGL larger than the transport io unit size, io_unit_size is not aligned to md_size,
+ block size 512 */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = data_bs * 4;
+ sgl->keyed.length = data_bs * 6;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 6);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 6);
+ CU_ASSERT((uint64_t)rdma_req.req.data == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 7);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT((uint64_t)rdma_req.req.buffers[0] == 0x2000);
+
+ for (i = 0; i < 3; ++i) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == 0x2000 + i * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
+ }
+ CU_ASSERT(rdma_req.data.wr.sg_list[3].addr == 0x2000 + 3 * (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[3].length == 488);
+ CU_ASSERT(rdma_req.data.wr.sg_list[3].lkey == g_rdma_mr.lkey);
+
+ /* 2nd IO buffer consumed */
+ CU_ASSERT(rdma_req.data.wr.sg_list[4].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[4].length == 24);
+ CU_ASSERT(rdma_req.data.wr.sg_list[4].lkey == g_rdma_mr.lkey);
+
+ CU_ASSERT(rdma_req.data.wr.sg_list[5].addr == 0x2000 + 24 + md_size);
+ CU_ASSERT(rdma_req.data.wr.sg_list[5].length == 512);
+ CU_ASSERT(rdma_req.data.wr.sg_list[5].lkey == g_rdma_mr.lkey);
+
+ CU_ASSERT(rdma_req.data.wr.sg_list[6].addr == 0x2000 + 24 + 512 + md_size * 2);
+ CU_ASSERT(rdma_req.data.wr.sg_list[6].length == 512);
+ CU_ASSERT(rdma_req.data.wr.sg_list[6].lkey == g_rdma_mr.lkey);
+
+ /* Part 7: simple I/O, number of SGL entries exceeds the number of entries
+ one WR can hold. Additional WR is chained */
+ MOCK_SET(spdk_mempool_get, &data);
+ aligned_buffer = (void *)((uintptr_t)((char *)&data + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = data_bs * 16;
+ sgl->keyed.length = data_bs * 16;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 16);
+ CU_ASSERT(rdma_req.req.iovcnt == 2);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 16);
+ CU_ASSERT(rdma_req.req.data == aligned_buffer);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 16);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ /* additional wr from pool */
+ CU_ASSERT(rdma_req.data.wr.next == (void *)&data.wr);
+ CU_ASSERT(rdma_req.data.wr.next->num_sge == 1);
+ CU_ASSERT(rdma_req.data.wr.next->next == &rdma_req.rsp.wr);
+
+ /* Part 8: simple I/O, data with metadata do not fit to 1 io_buffer */
+ MOCK_SET(spdk_mempool_get, (void *)0x2000);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = 516;
+ sgl->keyed.length = data_bs * 2;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 2);
+ CU_ASSERT(rdma_req.req.iovcnt == 3);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 2);
+ CU_ASSERT(rdma_req.req.data == (void *)0x2000);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 2);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].addr == 0x2000);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].length == 512);
+ CU_ASSERT(rdma_req.data.wr.sg_list[0].lkey == g_rdma_mr.lkey);
+
+ /* 2nd IO buffer consumed, offset 4 bytes due to part of the metadata
+ is located at the beginning of that buffer */
+ CU_ASSERT(rdma_req.data.wr.sg_list[1].addr == 0x2000 + 4);
+ CU_ASSERT(rdma_req.data.wr.sg_list[1].length == 512);
+ CU_ASSERT(rdma_req.data.wr.sg_list[1].lkey == g_rdma_mr.lkey);
+
+ /* Test 9 dealing with a buffer split over two Memory Regions */
+ MOCK_SET(spdk_mempool_get, (void *)&buffer);
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1, SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK,
+ 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = data_bs * 4;
+ sgl->keyed.length = data_bs * 2;
+ g_mr_size = data_bs;
+ g_mr_next_size = rtransport.transport.opts.io_unit_size;
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == rtransport.transport.opts.io_unit_size / 2);
+ CU_ASSERT((uint64_t)rdma_req.req.data == (((uint64_t)&buffer + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK));
+ CU_ASSERT(rdma_req.data.wr.num_sge == 2);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0xEEEE);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0xFFFF);
+ CU_ASSERT(rdma_req.req.buffers[0] == &buffer);
+ for (i = 0; i < 2; i++) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uint64_t)rdma_req.req.data + i *
+ (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].lkey == g_rdma_mr.lkey);
+ }
+ buffer_ptr = STAILQ_FIRST(&group.retired_bufs);
+ CU_ASSERT(buffer_ptr == &buffer);
+ STAILQ_REMOVE(&group.retired_bufs, buffer_ptr, spdk_nvmf_transport_pg_cache_buf, link);
+ CU_ASSERT(STAILQ_EMPTY(&group.retired_bufs));
+ g_mr_size = 0;
+ g_mr_next_size = 0;
+
+ /* Test 2: Multi SGL */
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_LAST_SEGMENT;
+ sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_OFFSET;
+ sgl->address = 0;
+ rdma_req.recv->buf = (void *)&sgl_desc;
+ MOCK_SET(spdk_mempool_get, &data);
+ aligned_buffer = (void *)((uintptr_t)((char *)&data + NVMF_DATA_BUFFER_MASK) &
+ ~NVMF_DATA_BUFFER_MASK);
+
+ /* part 1: 2 segments each with 1 wr. io_unit_size is aligned with data_bs + md_size */
+ reset_nvmf_rdma_request(&rdma_req);
+ spdk_dif_ctx_init(&rdma_req.req.dif.dif_ctx, data_bs + md_size, md_size, true, false,
+ SPDK_DIF_TYPE1,
+ SPDK_DIF_FLAGS_GUARD_CHECK | SPDK_DIF_FLAGS_REFTAG_CHECK, 0, 0, 0, 0, 0);
+ rdma_req.req.dif.dif_insert_or_strip = true;
+ rtransport.transport.opts.io_unit_size = (data_bs + md_size) * 4;
+ sgl->unkeyed.length = 2 * sizeof(struct spdk_nvme_sgl_descriptor);
+
+ for (i = 0; i < 2; i++) {
+ sgl_desc[i].keyed.type = SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK;
+ sgl_desc[i].keyed.subtype = SPDK_NVME_SGL_SUBTYPE_ADDRESS;
+ sgl_desc[i].keyed.length = data_bs * 4;
+ sgl_desc[i].address = 0x4000 + i * data_bs * 4;
+ sgl_desc[i].keyed.key = 0x44;
+ }
+
+ rc = nvmf_rdma_request_parse_sgl(&rtransport, &device, &rdma_req);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(rdma_req.req.data_from_pool == true);
+ CU_ASSERT(rdma_req.req.length == data_bs * 4 * 2);
+ CU_ASSERT(rdma_req.req.dif.orig_length == rdma_req.req.length);
+ CU_ASSERT(rdma_req.req.dif.elba_length == (data_bs + md_size) * 4 * 2);
+ CU_ASSERT(rdma_req.data.wr.num_sge == 4);
+ for (i = 0; i < 4; ++i) {
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i *
+ (data_bs + md_size));
+ CU_ASSERT(rdma_req.data.wr.sg_list[i].length == data_bs);
+ }
+
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(rdma_req.data.wr.wr.rdma.remote_addr == 0x4000);
+ CU_ASSERT(rdma_req.data.wr.next == &data.wr);
+ CU_ASSERT(data.wr.wr.rdma.rkey == 0x44);
+ CU_ASSERT(data.wr.wr.rdma.remote_addr == 0x4000 + data_bs * 4);
+ CU_ASSERT(data.wr.num_sge == 4);
+ for (i = 0; i < 4; ++i) {
+ CU_ASSERT(data.wr.sg_list[i].addr == (uintptr_t)((unsigned char *)aligned_buffer) + i *
+ (data_bs + md_size));
+ CU_ASSERT(data.wr.sg_list[i].length == data_bs);
+ }
+
+ CU_ASSERT(data.wr.next == &rdma_req.rsp.wr);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl);
+ CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_process);
+ CU_ADD_TEST(suite, test_nvmf_rdma_get_optimal_poll_group);
+ CU_ADD_TEST(suite, test_spdk_nvmf_rdma_request_parse_sgl_with_md);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/subsystem.c/.gitignore b/src/spdk/test/unit/lib/nvmf/subsystem.c/.gitignore
new file mode 100644
index 000000000..76ca0d330
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/subsystem.c/.gitignore
@@ -0,0 +1 @@
+subsystem_ut
diff --git a/src/spdk/test/unit/lib/nvmf/subsystem.c/Makefile b/src/spdk/test/unit/lib/nvmf/subsystem.c/Makefile
new file mode 100644
index 000000000..3d5fa6c8e
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/subsystem.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+SPDK_LIB_LIST = json
+TEST_FILE = subsystem_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/subsystem.c/subsystem_ut.c b/src/spdk/test/unit/lib/nvmf/subsystem.c/subsystem_ut.c
new file mode 100644
index 000000000..149c22da1
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/subsystem.c/subsystem_ut.c
@@ -0,0 +1,1342 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation. All rights reserved.
+ * Copyright (c) 2019 Mellanox Technologies LTD. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "common/lib/ut_multithread.c"
+#include "spdk_cunit.h"
+#include "spdk_internal/mock.h"
+#include "spdk_internal/thread.h"
+
+#include "nvmf/subsystem.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+
+DEFINE_STUB(spdk_bdev_module_claim_bdev,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
+ struct spdk_bdev_module *module), 0);
+
+DEFINE_STUB_V(spdk_bdev_module_release_bdev,
+ (struct spdk_bdev *bdev));
+
+DEFINE_STUB(spdk_bdev_get_block_size, uint32_t,
+ (const struct spdk_bdev *bdev), 512);
+
+DEFINE_STUB(spdk_bdev_get_md_size, uint32_t,
+ (const struct spdk_bdev *bdev), 0);
+
+DEFINE_STUB(spdk_bdev_is_md_interleaved, bool,
+ (const struct spdk_bdev *bdev), false);
+
+DEFINE_STUB(spdk_nvmf_transport_stop_listen,
+ int,
+ (struct spdk_nvmf_transport *transport,
+ const struct spdk_nvme_transport_id *trid), 0);
+
+int
+spdk_nvmf_transport_listen(struct spdk_nvmf_transport *transport,
+ const struct spdk_nvme_transport_id *trid)
+{
+ return 0;
+}
+
+void
+nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
+ struct spdk_nvme_transport_id *trid,
+ struct spdk_nvmf_discovery_log_page_entry *entry)
+{
+ entry->trtype = 42;
+}
+
+static struct spdk_nvmf_transport g_transport = {};
+
+struct spdk_nvmf_transport *
+spdk_nvmf_transport_create(const char *transport_name,
+ struct spdk_nvmf_transport_opts *tprt_opts)
+{
+ if (strcasecmp(transport_name, spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_RDMA))) {
+ return &g_transport;
+ }
+
+ return NULL;
+}
+
+struct spdk_nvmf_subsystem *
+spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn)
+{
+ return NULL;
+}
+
+struct spdk_nvmf_transport *
+spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, const char *transport_name)
+{
+ if (strncmp(transport_name, SPDK_NVME_TRANSPORT_NAME_RDMA, SPDK_NVMF_TRSTRING_MAX_LEN)) {
+ return &g_transport;
+ }
+
+ return NULL;
+}
+
+int
+nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem)
+{
+ return 0;
+}
+
+int
+nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+void
+nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+void
+nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+void
+nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+int
+spdk_nvme_transport_id_parse_trtype(enum spdk_nvme_transport_type *trtype, const char *str)
+{
+ if (trtype == NULL || str == NULL) {
+ return -EINVAL;
+ }
+
+ if (strcasecmp(str, "PCIe") == 0) {
+ *trtype = SPDK_NVME_TRANSPORT_PCIE;
+ } else if (strcasecmp(str, "RDMA") == 0) {
+ *trtype = SPDK_NVME_TRANSPORT_RDMA;
+ } else {
+ return -ENOENT;
+ }
+ return 0;
+}
+
+int
+spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
+ const struct spdk_nvme_transport_id *trid2)
+{
+ return 0;
+}
+
+int32_t
+spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return -1;
+}
+
+int32_t
+spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ return -1;
+}
+
+int
+spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return -1;
+}
+
+void
+nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
+{
+}
+
+static struct spdk_nvmf_ctrlr *g_ns_changed_ctrlr = NULL;
+static uint32_t g_ns_changed_nsid = 0;
+void
+nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid)
+{
+ g_ns_changed_ctrlr = ctrlr;
+ g_ns_changed_nsid = nsid;
+}
+
+int
+spdk_bdev_open_ext(const char *bdev_name, bool write, spdk_bdev_event_cb_t event_cb,
+ void *event_ctx, struct spdk_bdev_desc **_desc)
+{
+ return 0;
+}
+
+void
+spdk_bdev_close(struct spdk_bdev_desc *desc)
+{
+}
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return "test";
+}
+
+const struct spdk_uuid *
+spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
+{
+ return &bdev->uuid;
+}
+
+static void
+test_spdk_nvmf_subsystem_add_ns(void)
+{
+ struct spdk_nvmf_tgt tgt = {};
+ struct spdk_nvmf_subsystem subsystem = {
+ .max_nsid = 0,
+ .ns = NULL,
+ .tgt = &tgt
+ };
+ struct spdk_bdev bdev1 = {}, bdev2 = {};
+ struct spdk_nvmf_ns_opts ns_opts;
+ uint32_t nsid;
+ int rc;
+
+ tgt.max_subsystems = 1024;
+ tgt.subsystems = calloc(tgt.max_subsystems, sizeof(struct spdk_nvmf_subsystem *));
+ SPDK_CU_ASSERT_FATAL(tgt.subsystems != NULL);
+
+ /* Allow NSID to be assigned automatically */
+ spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
+ nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev1, &ns_opts, sizeof(ns_opts), NULL);
+ /* NSID 1 is the first unused ID */
+ CU_ASSERT(nsid == 1);
+ CU_ASSERT(subsystem.max_nsid == 1);
+ SPDK_CU_ASSERT_FATAL(subsystem.ns != NULL);
+ SPDK_CU_ASSERT_FATAL(subsystem.ns[nsid - 1] != NULL);
+ CU_ASSERT(subsystem.ns[nsid - 1]->bdev == &bdev1);
+
+ /* Request a specific NSID */
+ spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
+ ns_opts.nsid = 5;
+ nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev2, &ns_opts, sizeof(ns_opts), NULL);
+ CU_ASSERT(nsid == 5);
+ CU_ASSERT(subsystem.max_nsid == 5);
+ SPDK_CU_ASSERT_FATAL(subsystem.ns[nsid - 1] != NULL);
+ CU_ASSERT(subsystem.ns[nsid - 1]->bdev == &bdev2);
+
+ /* Request an NSID that is already in use */
+ spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
+ ns_opts.nsid = 5;
+ nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev2, &ns_opts, sizeof(ns_opts), NULL);
+ CU_ASSERT(nsid == 0);
+ CU_ASSERT(subsystem.max_nsid == 5);
+
+ /* Request 0xFFFFFFFF (invalid NSID, reserved for broadcast) */
+ spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
+ ns_opts.nsid = 0xFFFFFFFF;
+ nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev2, &ns_opts, sizeof(ns_opts), NULL);
+ CU_ASSERT(nsid == 0);
+ CU_ASSERT(subsystem.max_nsid == 5);
+
+ rc = spdk_nvmf_subsystem_remove_ns(&subsystem, 1);
+ CU_ASSERT(rc == 0);
+ rc = spdk_nvmf_subsystem_remove_ns(&subsystem, 5);
+ CU_ASSERT(rc == 0);
+
+ free(subsystem.ns);
+ free(tgt.subsystems);
+}
+
+static void
+nvmf_test_create_subsystem(void)
+{
+ struct spdk_nvmf_tgt tgt = {};
+ char nqn[256];
+ struct spdk_nvmf_subsystem *subsystem;
+
+ tgt.max_subsystems = 1024;
+ tgt.subsystems = calloc(tgt.max_subsystems, sizeof(struct spdk_nvmf_subsystem *));
+ SPDK_CU_ASSERT_FATAL(tgt.subsystems != NULL);
+
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* valid name with complex reverse domain */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk-full--rev-domain.name:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* Valid name discovery controller */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+
+ /* Invalid name, no user supplied string */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Valid name, only contains top-level domain name */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* Invalid name, domain label > 63 characters */
+ snprintf(nqn, sizeof(nqn),
+ "nqn.2016-06.io.abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz:sub");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid name, domain label starts with digit */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.3spdk:sub");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid name, domain label starts with - */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.-spdk:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid name, domain label ends with - */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk-:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid name, domain label with multiple consecutive periods */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io..spdk:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Longest valid name */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:");
+ memset(nqn + strlen(nqn), 'a', 223 - strlen(nqn));
+ nqn[223] = '\0';
+ CU_ASSERT(strlen(nqn) == 223);
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* Invalid name, too long */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:");
+ memset(nqn + strlen(nqn), 'a', 224 - strlen(nqn));
+ nqn[224] = '\0';
+ CU_ASSERT(strlen(nqn) == 224);
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ CU_ASSERT(subsystem == NULL);
+
+ /* Valid name using uuid format */
+ snprintf(nqn, sizeof(nqn), "nqn.2014-08.org.nvmexpress:uuid:11111111-aaaa-bbdd-FFEE-123456789abc");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* Invalid name user string contains an invalid utf-8 character */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:\xFFsubsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Valid name with non-ascii but valid utf-8 characters */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:\xe1\x8a\x88subsystem1\xca\x80");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* Invalid uuid (too long) */
+ snprintf(nqn, sizeof(nqn),
+ "nqn.2014-08.org.nvmexpress:uuid:11111111-aaaa-bbdd-FFEE-123456789abcdef");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid uuid (dashes placed incorrectly) */
+ snprintf(nqn, sizeof(nqn), "nqn.2014-08.org.nvmexpress:uuid:111111-11aaaa-bbdd-FFEE-123456789abc");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid uuid (invalid characters in uuid) */
+ snprintf(nqn, sizeof(nqn), "nqn.2014-08.org.nvmexpress:uuid:111hg111-aaaa-bbdd-FFEE-123456789abc");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ free(tgt.subsystems);
+}
+
+static void
+test_spdk_nvmf_subsystem_set_sn(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {};
+
+ /* Basic valid serial number */
+ CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "abcd xyz") == 0);
+ CU_ASSERT(strcmp(subsystem.sn, "abcd xyz") == 0);
+
+ /* Exactly 20 characters (valid) */
+ CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "12345678901234567890") == 0);
+ CU_ASSERT(strcmp(subsystem.sn, "12345678901234567890") == 0);
+
+ /* 21 characters (too long, invalid) */
+ CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "123456789012345678901") < 0);
+
+ /* Non-ASCII characters (invalid) */
+ CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "abcd\txyz") < 0);
+}
+
+/*
+ * Reservation Unit Test Configuration
+ * -------- -------- --------
+ * | Host A | | Host B | | Host C |
+ * -------- -------- --------
+ * / \ | |
+ * -------- -------- ------- -------
+ * |Ctrlr1_A| |Ctrlr2_A| |Ctrlr_B| |Ctrlr_C|
+ * -------- -------- ------- -------
+ * \ \ / /
+ * \ \ / /
+ * \ \ / /
+ * --------------------------------------
+ * | NAMESPACE 1 |
+ * --------------------------------------
+ */
+static struct spdk_nvmf_subsystem g_subsystem;
+static struct spdk_nvmf_ctrlr g_ctrlr1_A, g_ctrlr2_A, g_ctrlr_B, g_ctrlr_C;
+static struct spdk_nvmf_ns g_ns;
+static struct spdk_bdev g_bdev;
+struct spdk_nvmf_subsystem_pg_ns_info g_ns_info;
+
+void
+nvmf_ctrlr_async_event_reservation_notification(struct spdk_nvmf_ctrlr *ctrlr)
+{
+}
+
+static void
+ut_reservation_init(void)
+{
+
+ TAILQ_INIT(&g_subsystem.ctrlrs);
+
+ memset(&g_ns, 0, sizeof(g_ns));
+ TAILQ_INIT(&g_ns.registrants);
+ g_ns.subsystem = &g_subsystem;
+ g_ns.ptpl_file = NULL;
+ g_ns.ptpl_activated = false;
+ spdk_uuid_generate(&g_bdev.uuid);
+ g_ns.bdev = &g_bdev;
+
+ /* Host A has two controllers */
+ spdk_uuid_generate(&g_ctrlr1_A.hostid);
+ TAILQ_INIT(&g_ctrlr1_A.log_head);
+ g_ctrlr1_A.subsys = &g_subsystem;
+ g_ctrlr1_A.num_avail_log_pages = 0;
+ TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr1_A, link);
+ spdk_uuid_copy(&g_ctrlr2_A.hostid, &g_ctrlr1_A.hostid);
+ TAILQ_INIT(&g_ctrlr2_A.log_head);
+ g_ctrlr2_A.subsys = &g_subsystem;
+ g_ctrlr2_A.num_avail_log_pages = 0;
+ TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr2_A, link);
+
+ /* Host B has 1 controller */
+ spdk_uuid_generate(&g_ctrlr_B.hostid);
+ TAILQ_INIT(&g_ctrlr_B.log_head);
+ g_ctrlr_B.subsys = &g_subsystem;
+ g_ctrlr_B.num_avail_log_pages = 0;
+ TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr_B, link);
+
+ /* Host C has 1 controller */
+ spdk_uuid_generate(&g_ctrlr_C.hostid);
+ TAILQ_INIT(&g_ctrlr_C.log_head);
+ g_ctrlr_C.subsys = &g_subsystem;
+ g_ctrlr_C.num_avail_log_pages = 0;
+ TAILQ_INSERT_TAIL(&g_subsystem.ctrlrs, &g_ctrlr_C, link);
+}
+
+static void
+ut_reservation_deinit(void)
+{
+ struct spdk_nvmf_registrant *reg, *tmp;
+ struct spdk_nvmf_reservation_log *log, *log_tmp;
+ struct spdk_nvmf_ctrlr *ctrlr, *ctrlr_tmp;
+
+ TAILQ_FOREACH_SAFE(reg, &g_ns.registrants, link, tmp) {
+ TAILQ_REMOVE(&g_ns.registrants, reg, link);
+ free(reg);
+ }
+ TAILQ_FOREACH_SAFE(log, &g_ctrlr1_A.log_head, link, log_tmp) {
+ TAILQ_REMOVE(&g_ctrlr1_A.log_head, log, link);
+ free(log);
+ }
+ g_ctrlr1_A.num_avail_log_pages = 0;
+ TAILQ_FOREACH_SAFE(log, &g_ctrlr2_A.log_head, link, log_tmp) {
+ TAILQ_REMOVE(&g_ctrlr2_A.log_head, log, link);
+ free(log);
+ }
+ g_ctrlr2_A.num_avail_log_pages = 0;
+ TAILQ_FOREACH_SAFE(log, &g_ctrlr_B.log_head, link, log_tmp) {
+ TAILQ_REMOVE(&g_ctrlr_B.log_head, log, link);
+ free(log);
+ }
+ g_ctrlr_B.num_avail_log_pages = 0;
+ TAILQ_FOREACH_SAFE(log, &g_ctrlr_C.log_head, link, log_tmp) {
+ TAILQ_REMOVE(&g_ctrlr_C.log_head, log, link);
+ free(log);
+ }
+ g_ctrlr_C.num_avail_log_pages = 0;
+
+ TAILQ_FOREACH_SAFE(ctrlr, &g_subsystem.ctrlrs, link, ctrlr_tmp) {
+ TAILQ_REMOVE(&g_subsystem.ctrlrs, ctrlr, link);
+ }
+}
+
+static struct spdk_nvmf_request *
+ut_reservation_build_req(uint32_t length)
+{
+ struct spdk_nvmf_request *req;
+
+ req = calloc(1, sizeof(*req));
+ assert(req != NULL);
+
+ req->data = calloc(1, length);
+ assert(req->data != NULL);
+ req->length = length;
+
+ req->cmd = (union nvmf_h2c_msg *)calloc(1, sizeof(union nvmf_h2c_msg));
+ assert(req->cmd != NULL);
+
+ req->rsp = (union nvmf_c2h_msg *)calloc(1, sizeof(union nvmf_c2h_msg));
+ assert(req->rsp != NULL);
+
+ return req;
+}
+
+static void
+ut_reservation_free_req(struct spdk_nvmf_request *req)
+{
+ free(req->cmd);
+ free(req->rsp);
+ free(req->data);
+ free(req);
+}
+
+static void
+ut_reservation_build_register_request(struct spdk_nvmf_request *req,
+ uint8_t rrega, uint8_t iekey,
+ uint8_t cptpl, uint64_t crkey,
+ uint64_t nrkey)
+{
+ struct spdk_nvme_reservation_register_data key;
+ struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
+
+ key.crkey = crkey;
+ key.nrkey = nrkey;
+ cmd->cdw10 = 0;
+ cmd->cdw10_bits.resv_register.rrega = rrega;
+ cmd->cdw10_bits.resv_register.iekey = iekey;
+ cmd->cdw10_bits.resv_register.cptpl = cptpl;
+ memcpy(req->data, &key, sizeof(key));
+}
+
+static void
+ut_reservation_build_acquire_request(struct spdk_nvmf_request *req,
+ uint8_t racqa, uint8_t iekey,
+ uint8_t rtype, uint64_t crkey,
+ uint64_t prkey)
+{
+ struct spdk_nvme_reservation_acquire_data key;
+ struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
+
+ key.crkey = crkey;
+ key.prkey = prkey;
+ cmd->cdw10 = 0;
+ cmd->cdw10_bits.resv_acquire.racqa = racqa;
+ cmd->cdw10_bits.resv_acquire.iekey = iekey;
+ cmd->cdw10_bits.resv_acquire.rtype = rtype;
+ memcpy(req->data, &key, sizeof(key));
+}
+
+static void
+ut_reservation_build_release_request(struct spdk_nvmf_request *req,
+ uint8_t rrela, uint8_t iekey,
+ uint8_t rtype, uint64_t crkey)
+{
+ struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
+
+ cmd->cdw10 = 0;
+ cmd->cdw10_bits.resv_release.rrela = rrela;
+ cmd->cdw10_bits.resv_release.iekey = iekey;
+ cmd->cdw10_bits.resv_release.rtype = rtype;
+ memcpy(req->data, &crkey, sizeof(crkey));
+}
+
+/*
+ * Construct four registrants for other test cases.
+ *
+ * g_ctrlr1_A register with key 0xa1.
+ * g_ctrlr2_A register with key 0xa1.
+ * g_ctrlr_B register with key 0xb1.
+ * g_ctrlr_C register with key 0xc1.
+ * */
+static void
+ut_reservation_build_registrants(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+ struct spdk_nvmf_registrant *reg;
+ uint32_t gen;
+
+ req = ut_reservation_build_req(16);
+ rsp = &req->rsp->nvme_cpl;
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ gen = g_ns.gen;
+
+ /* TEST CASE: g_ctrlr1_A register with a new key */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY,
+ 0, 0, 0, 0xa1);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xa1);
+ SPDK_CU_ASSERT_FATAL(g_ns.gen == gen + 1);
+
+ /* TEST CASE: g_ctrlr2_A register with a new key, because it has same
+ * Host Identifier with g_ctrlr1_A, so the register key should same.
+ */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY,
+ 0, 0, 0, 0xa2);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr2_A, req);
+ /* Reservation conflict for other key than 0xa1 */
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_RESERVATION_CONFLICT);
+
+ /* g_ctrlr_B register with a new key */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY,
+ 0, 0, 0, 0xb1);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xb1);
+ SPDK_CU_ASSERT_FATAL(g_ns.gen == gen + 2);
+
+ /* g_ctrlr_C register with a new key */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY,
+ 0, 0, 0, 0xc1);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr_C, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xc1);
+ SPDK_CU_ASSERT_FATAL(g_ns.gen == gen + 3);
+
+ ut_reservation_free_req(req);
+}
+
+static void
+test_reservation_register(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+ struct spdk_nvmf_registrant *reg;
+ uint32_t gen;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ rsp = &req->rsp->nvme_cpl;
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ ut_reservation_build_registrants();
+
+ /* TEST CASE: Replace g_ctrlr1_A with a new key */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REPLACE_KEY,
+ 0, 0, 0xa1, 0xa11);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg->rkey == 0xa11);
+
+ /* TEST CASE: Host A with g_ctrlr1_A get reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE
+ */
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, 0xa11, 0x0);
+ gen = g_ns.gen;
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
+ SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0xa11);
+ SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
+ SPDK_CU_ASSERT_FATAL(g_ns.gen == gen);
+
+ /* TEST CASE: g_ctrlr_C unregister with IEKEY enabled */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY,
+ 1, 0, 0, 0);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr_C, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+
+ /* TEST CASE: g_ctrlr_B unregister with correct key */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY,
+ 0, 0, 0xb1, 0);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+
+ /* TEST CASE: g_ctrlr1_A unregister with correct key,
+ * reservation should be removed as well.
+ */
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY,
+ 0, 0, 0xa11, 0);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0);
+ SPDK_CU_ASSERT_FATAL(g_ns.holder == NULL);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_register_with_ptpl(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+ struct spdk_nvmf_registrant *reg;
+ bool update_sgroup = false;
+ int rc;
+ struct spdk_nvmf_reservation_info info;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ rsp = &req->rsp->nvme_cpl;
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ /* TEST CASE: No persistent file, register with PTPL enabled will fail */
+ g_ns.ptpl_file = NULL;
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0,
+ SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS, 0, 0xa1);
+ update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(update_sgroup == false);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc != SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+
+ /* TEST CASE: Enable PTPL */
+ g_ns.ptpl_file = "/tmp/Ns1PR.cfg";
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0,
+ SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS, 0, 0xa1);
+ update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(update_sgroup == true);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.ptpl_activated == true);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(!spdk_uuid_compare(&g_ctrlr1_A.hostid, &reg->hostid));
+ /* Load reservation information from configuration file */
+ memset(&info, 0, sizeof(info));
+ rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true);
+
+ /* TEST CASE: Disable PTPL */
+ rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0,
+ SPDK_NVME_RESERVE_PTPL_CLEAR_POWER_ON, 0, 0xa1);
+ update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(update_sgroup == true);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.ptpl_activated == false);
+ rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info);
+ SPDK_CU_ASSERT_FATAL(rc < 0);
+ unlink(g_ns.ptpl_file);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_acquire_preempt_1(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+ struct spdk_nvmf_registrant *reg;
+ uint32_t gen;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ rsp = &req->rsp->nvme_cpl;
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ ut_reservation_build_registrants();
+
+ gen = g_ns.gen;
+ /* ACQUIRE: Host A with g_ctrlr1_A acquire reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE.
+ */
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xa1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
+ SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0xa1);
+ SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
+ SPDK_CU_ASSERT_FATAL(g_ns.gen == gen);
+
+ /* TEST CASE: g_ctrlr1_A holds the reservation, g_ctrlr_B preempt g_ctrl1_A,
+ * g_ctrl1_A registrant is unregistred.
+ */
+ gen = g_ns.gen;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_PREEMPT, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xb1, 0xa1);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(g_ns.gen > gen);
+
+ /* TEST CASE: g_ctrlr_B holds the reservation, g_ctrlr_C preempt g_ctrlr_B
+ * with valid key and PRKEY set to 0, all registrants other the host that issued
+ * the command are unregistered.
+ */
+ gen = g_ns.gen;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_PREEMPT, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xc1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_C, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr2_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(g_ns.gen > gen);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_acquire_release_with_ptpl(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+ struct spdk_nvmf_registrant *reg;
+ bool update_sgroup = false;
+ struct spdk_uuid holder_uuid;
+ int rc;
+ struct spdk_nvmf_reservation_info info;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ rsp = &req->rsp->nvme_cpl;
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ /* TEST CASE: Enable PTPL */
+ g_ns.ptpl_file = "/tmp/Ns1PR.cfg";
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_REGISTER_KEY, 0,
+ SPDK_NVME_RESERVE_PTPL_PERSIST_POWER_LOSS, 0, 0xa1);
+ update_sgroup = nvmf_ns_reservation_register(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(update_sgroup == true);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.ptpl_activated == true);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg != NULL);
+ SPDK_CU_ASSERT_FATAL(!spdk_uuid_compare(&g_ctrlr1_A.hostid, &reg->hostid));
+ /* Load reservation information from configuration file */
+ memset(&info, 0, sizeof(info));
+ rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true);
+
+ /* TEST CASE: Acquire the reservation */
+ rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xa1, 0x0);
+ update_sgroup = nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(update_sgroup == true);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ memset(&info, 0, sizeof(info));
+ rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true);
+ SPDK_CU_ASSERT_FATAL(info.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
+ SPDK_CU_ASSERT_FATAL(info.crkey == 0xa1);
+ spdk_uuid_parse(&holder_uuid, info.holder_uuid);
+ SPDK_CU_ASSERT_FATAL(!spdk_uuid_compare(&g_ctrlr1_A.hostid, &holder_uuid));
+
+ /* TEST CASE: Release the reservation */
+ rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
+ ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xa1);
+ update_sgroup = nvmf_ns_reservation_release(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(update_sgroup == true);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ memset(&info, 0, sizeof(info));
+ rc = nvmf_ns_load_reservation(g_ns.ptpl_file, &info);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(info.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(info.crkey == 0);
+ SPDK_CU_ASSERT_FATAL(info.ptpl_activated == true);
+ unlink(g_ns.ptpl_file);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_release(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+ struct spdk_nvmf_registrant *reg;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ rsp = &req->rsp->nvme_cpl;
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ ut_reservation_build_registrants();
+
+ /* ACQUIRE: Host A with g_ctrlr1_A get reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS
+ */
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xa1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr1_A, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(g_ns.holder == reg);
+
+ /* Test Case: Host B release the reservation */
+ ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xb1);
+ nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(g_ns.crkey == 0);
+ SPDK_CU_ASSERT_FATAL(g_ns.holder == NULL);
+
+ /* Test Case: Host C clear the registrants */
+ ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_CLEAR, 0,
+ 0, 0xc1);
+ nvmf_ns_reservation_release(&g_ns, &g_ctrlr_C, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr1_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr2_A.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_B.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+ reg = nvmf_ns_reservation_get_registrant(&g_ns, &g_ctrlr_C.hostid);
+ SPDK_CU_ASSERT_FATAL(reg == NULL);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+void
+nvmf_ctrlr_reservation_notice_log(struct spdk_nvmf_ctrlr *ctrlr,
+ struct spdk_nvmf_ns *ns,
+ enum spdk_nvme_reservation_notification_log_page_type type)
+{
+ ctrlr->num_avail_log_pages++;
+}
+
+static void
+test_reservation_unregister_notification(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ rsp = &req->rsp->nvme_cpl;
+
+ ut_reservation_build_registrants();
+
+ /* ACQUIRE: Host B with g_ctrlr_B get reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY
+ */
+ rsp->status.sc = 0xff;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
+
+ /* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B unregister the registration.
+ * Reservation release notification sends to g_ctrlr1_A/g_ctrlr2_A/g_ctrlr_C only for
+ * SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY or SPDK_NVME_RESERVE_EXCLUSIVE_ACCESS_REG_ONLY
+ * type.
+ */
+ rsp->status.sc = 0xff;
+ g_ctrlr1_A.num_avail_log_pages = 0;
+ g_ctrlr2_A.num_avail_log_pages = 0;
+ g_ctrlr_B.num_avail_log_pages = 5;
+ g_ctrlr_C.num_avail_log_pages = 0;
+ ut_reservation_build_register_request(req, SPDK_NVME_RESERVE_UNREGISTER_KEY,
+ 0, 0, 0xb1, 0);
+ nvmf_ns_reservation_register(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_C.num_avail_log_pages);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_release_notification(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ rsp = &req->rsp->nvme_cpl;
+
+ ut_reservation_build_registrants();
+
+ /* ACQUIRE: Host B with g_ctrlr_B get reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY
+ */
+ rsp->status.sc = 0xff;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
+
+ /* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B release the reservation.
+ * Reservation release notification sends to g_ctrlr1_A/g_ctrlr2_A/g_ctrlr_C.
+ */
+ rsp->status.sc = 0xff;
+ g_ctrlr1_A.num_avail_log_pages = 0;
+ g_ctrlr2_A.num_avail_log_pages = 0;
+ g_ctrlr_B.num_avail_log_pages = 5;
+ g_ctrlr_C.num_avail_log_pages = 0;
+ ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1);
+ nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_C.num_avail_log_pages);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_release_notification_write_exclusive(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ rsp = &req->rsp->nvme_cpl;
+
+ ut_reservation_build_registrants();
+
+ /* ACQUIRE: Host B with g_ctrlr_B get reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE
+ */
+ rsp->status.sc = 0xff;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, 0xb1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE);
+
+ /* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B release the reservation.
+ * Because the reservation type is SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
+ * no reservation notification occurs.
+ */
+ rsp->status.sc = 0xff;
+ g_ctrlr1_A.num_avail_log_pages = 5;
+ g_ctrlr2_A.num_avail_log_pages = 5;
+ g_ctrlr_B.num_avail_log_pages = 5;
+ g_ctrlr_C.num_avail_log_pages = 5;
+ ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_RELEASE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE, 0xb1);
+ nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr1_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr2_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_C.num_avail_log_pages);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_clear_notification(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ rsp = &req->rsp->nvme_cpl;
+
+ ut_reservation_build_registrants();
+
+ /* ACQUIRE: Host B with g_ctrlr_B get reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY
+ */
+ rsp->status.sc = 0xff;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
+
+ /* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_B clear the reservation.
+ * Reservation Preempted notification sends to g_ctrlr1_A/g_ctrlr2_A/g_ctrlr_C.
+ */
+ rsp->status.sc = 0xff;
+ g_ctrlr1_A.num_avail_log_pages = 0;
+ g_ctrlr2_A.num_avail_log_pages = 0;
+ g_ctrlr_B.num_avail_log_pages = 5;
+ g_ctrlr_C.num_avail_log_pages = 0;
+ ut_reservation_build_release_request(req, SPDK_NVME_RESERVE_CLEAR, 0,
+ 0, 0xb1);
+ nvmf_ns_reservation_release(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == 0);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_B.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_C.num_avail_log_pages);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_reservation_preempt_notification(void)
+{
+ struct spdk_nvmf_request *req;
+ struct spdk_nvme_cpl *rsp;
+
+ ut_reservation_init();
+
+ req = ut_reservation_build_req(16);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ rsp = &req->rsp->nvme_cpl;
+
+ ut_reservation_build_registrants();
+
+ /* ACQUIRE: Host B with g_ctrlr_B get reservation with
+ * type SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY
+ */
+ rsp->status.sc = 0xff;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_ACQUIRE, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY, 0xb1, 0x0);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_B, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_REG_ONLY);
+
+ /* Test Case : g_ctrlr_B holds the reservation, g_ctrlr_C preempt g_ctrlr_B,
+ * g_ctrlr_B registrant is unregistred, and reservation is preempted.
+ * Registration Preempted notification sends to g_ctrlr_B.
+ * Reservation Preempted notification sends to g_ctrlr1_A/g_ctrlr2_A.
+ */
+ rsp->status.sc = 0xff;
+ g_ctrlr1_A.num_avail_log_pages = 0;
+ g_ctrlr2_A.num_avail_log_pages = 0;
+ g_ctrlr_B.num_avail_log_pages = 0;
+ g_ctrlr_C.num_avail_log_pages = 5;
+ ut_reservation_build_acquire_request(req, SPDK_NVME_RESERVE_PREEMPT, 0,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS, 0xc1, 0xb1);
+ nvmf_ns_reservation_acquire(&g_ns, &g_ctrlr_C, req);
+ SPDK_CU_ASSERT_FATAL(rsp->status.sc == SPDK_NVME_SC_SUCCESS);
+ SPDK_CU_ASSERT_FATAL(g_ns.rtype == SPDK_NVME_RESERVE_WRITE_EXCLUSIVE_ALL_REGS);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr1_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr2_A.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(1 == g_ctrlr_B.num_avail_log_pages);
+ SPDK_CU_ASSERT_FATAL(5 == g_ctrlr_C.num_avail_log_pages);
+
+ ut_reservation_free_req(req);
+ ut_reservation_deinit();
+}
+
+static void
+test_spdk_nvmf_ns_event(void)
+{
+ struct spdk_nvmf_tgt tgt = {};
+ struct spdk_nvmf_subsystem subsystem = {
+ .max_nsid = 0,
+ .ns = NULL,
+ .tgt = &tgt
+ };
+ struct spdk_nvmf_ctrlr ctrlr = {
+ .subsys = &subsystem
+ };
+ struct spdk_bdev bdev1 = {};
+ struct spdk_nvmf_ns_opts ns_opts;
+ uint32_t nsid;
+
+ tgt.max_subsystems = 1024;
+ tgt.subsystems = calloc(tgt.max_subsystems, sizeof(struct spdk_nvmf_subsystem *));
+ SPDK_CU_ASSERT_FATAL(tgt.subsystems != NULL);
+
+ /* Add one namespace */
+ spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
+ nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev1, &ns_opts, sizeof(ns_opts), NULL);
+ CU_ASSERT(nsid == 1);
+ CU_ASSERT(NULL != subsystem.ns[0]);
+
+ /* Add one controller */
+ TAILQ_INIT(&subsystem.ctrlrs);
+ TAILQ_INSERT_TAIL(&subsystem.ctrlrs, &ctrlr, link);
+
+ /* Namespace resize event */
+ subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+ g_ns_changed_nsid = 0xFFFFFFFF;
+ g_ns_changed_ctrlr = NULL;
+ nvmf_ns_event(SPDK_BDEV_EVENT_RESIZE, &bdev1, subsystem.ns[0]);
+ CU_ASSERT(SPDK_NVMF_SUBSYSTEM_PAUSING == subsystem.state);
+
+ poll_threads();
+ CU_ASSERT(1 == g_ns_changed_nsid);
+ CU_ASSERT(&ctrlr == g_ns_changed_ctrlr);
+ CU_ASSERT(SPDK_NVMF_SUBSYSTEM_ACTIVE == subsystem.state);
+
+ /* Namespace remove event */
+ subsystem.state = SPDK_NVMF_SUBSYSTEM_ACTIVE;
+ g_ns_changed_nsid = 0xFFFFFFFF;
+ g_ns_changed_ctrlr = NULL;
+ nvmf_ns_event(SPDK_BDEV_EVENT_REMOVE, &bdev1, subsystem.ns[0]);
+ CU_ASSERT(SPDK_NVMF_SUBSYSTEM_PAUSING == subsystem.state);
+ CU_ASSERT(0xFFFFFFFF == g_ns_changed_nsid);
+ CU_ASSERT(NULL == g_ns_changed_ctrlr);
+
+ poll_threads();
+ CU_ASSERT(1 == g_ns_changed_nsid);
+ CU_ASSERT(&ctrlr == g_ns_changed_ctrlr);
+ CU_ASSERT(NULL == subsystem.ns[0]);
+ CU_ASSERT(SPDK_NVMF_SUBSYSTEM_ACTIVE == subsystem.state);
+
+ free(subsystem.ns);
+ free(tgt.subsystems);
+}
+
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+
+ CU_ADD_TEST(suite, nvmf_test_create_subsystem);
+ CU_ADD_TEST(suite, test_spdk_nvmf_subsystem_add_ns);
+ CU_ADD_TEST(suite, test_spdk_nvmf_subsystem_set_sn);
+ CU_ADD_TEST(suite, test_reservation_register);
+ CU_ADD_TEST(suite, test_reservation_register_with_ptpl);
+ CU_ADD_TEST(suite, test_reservation_acquire_preempt_1);
+ CU_ADD_TEST(suite, test_reservation_acquire_release_with_ptpl);
+ CU_ADD_TEST(suite, test_reservation_release);
+ CU_ADD_TEST(suite, test_reservation_unregister_notification);
+ CU_ADD_TEST(suite, test_reservation_release_notification);
+ CU_ADD_TEST(suite, test_reservation_release_notification_write_exclusive);
+ CU_ADD_TEST(suite, test_reservation_clear_notification);
+ CU_ADD_TEST(suite, test_reservation_preempt_notification);
+ CU_ADD_TEST(suite, test_spdk_nvmf_ns_event);
+
+ allocate_threads(1);
+ set_thread(0);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ free_threads();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/tcp.c/.gitignore b/src/spdk/test/unit/lib/nvmf/tcp.c/.gitignore
new file mode 100644
index 000000000..ea821fbfa
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/tcp.c/.gitignore
@@ -0,0 +1 @@
+tcp_ut
diff --git a/src/spdk/test/unit/lib/nvmf/tcp.c/Makefile b/src/spdk/test/unit/lib/nvmf/tcp.c/Makefile
new file mode 100644
index 000000000..2f6dc9b85
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/tcp.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = tcp_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/tcp.c/tcp_ut.c b/src/spdk/test/unit/lib/nvmf/tcp.c/tcp_ut.c
new file mode 100644
index 000000000..a6d6d9da3
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/tcp.c/tcp_ut.c
@@ -0,0 +1,722 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk/nvmf_spec.h"
+#include "spdk_cunit.h"
+
+#include "spdk_internal/mock.h"
+#include "spdk_internal/thread.h"
+
+#include "common/lib/test_env.c"
+#include "common/lib/test_sock.c"
+
+#include "nvmf/ctrlr.c"
+#include "nvmf/tcp.c"
+
+#define UT_IPV4_ADDR "192.168.0.1"
+#define UT_PORT "4420"
+#define UT_NVMF_ADRFAM_INVALID 0xf
+#define UT_MAX_QUEUE_DEPTH 128
+#define UT_MAX_QPAIRS_PER_CTRLR 128
+#define UT_IN_CAPSULE_DATA_SIZE 1024
+#define UT_MAX_IO_SIZE 4096
+#define UT_IO_UNIT_SIZE 1024
+#define UT_MAX_AQ_DEPTH 64
+#define UT_SQ_HEAD_MAX 128
+#define UT_NUM_SHARED_BUFFERS 128
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+
+DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
+ int,
+ (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
+ 0);
+
+DEFINE_STUB(nvmf_subsystem_add_ctrlr,
+ int,
+ (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
+ 0);
+
+DEFINE_STUB(nvmf_subsystem_get_ctrlr,
+ struct spdk_nvmf_ctrlr *,
+ (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
+ struct spdk_nvmf_subsystem *,
+ (struct spdk_nvmf_tgt *tgt, const char *subnqn),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
+ bool,
+ (struct spdk_nvmf_subsystem *subsystem, const struct spdk_nvme_transport_id *trid),
+ true);
+
+DEFINE_STUB_V(nvmf_get_discovery_log_page,
+ (struct spdk_nvmf_tgt *tgt, const char *hostnqn, struct iovec *iov,
+ uint32_t iovcnt, uint64_t offset, uint32_t length));
+
+DEFINE_STUB_V(nvmf_subsystem_remove_ctrlr,
+ (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr));
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns,
+ struct spdk_nvmf_ns *,
+ (struct spdk_nvmf_subsystem *subsystem),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns,
+ struct spdk_nvmf_ns *,
+ (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
+ bool,
+ (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
+ true);
+
+DEFINE_STUB(nvmf_ctrlr_dsm_supported,
+ bool,
+ (struct spdk_nvmf_ctrlr *ctrlr),
+ false);
+
+DEFINE_STUB(nvmf_ctrlr_write_zeroes_supported,
+ bool,
+ (struct spdk_nvmf_ctrlr *ctrlr),
+ false);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_read_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_write_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_compare_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_compare_and_write_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *cmp_req, struct spdk_nvmf_request *write_req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_write_zeroes_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_flush_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_dsm_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_nvme_passthru_io,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB(spdk_nvmf_bdev_ctrlr_abort_cmd,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct spdk_nvmf_request *req, struct spdk_nvmf_request *req_to_abort),
+ 0);
+
+DEFINE_STUB(nvmf_bdev_ctrlr_get_dif_ctx,
+ bool,
+ (struct spdk_bdev *bdev, struct spdk_nvme_cmd *cmd, struct spdk_dif_ctx *dif_ctx),
+ false);
+
+DEFINE_STUB(nvmf_transport_req_complete,
+ int,
+ (struct spdk_nvmf_request *req),
+ 0);
+
+DEFINE_STUB_V(spdk_nvmf_request_free_buffers,
+ (struct spdk_nvmf_request *req, struct spdk_nvmf_transport_poll_group *group,
+ struct spdk_nvmf_transport *transport));
+
+DEFINE_STUB(spdk_sock_get_optimal_sock_group,
+ int,
+ (struct spdk_sock *sock, struct spdk_sock_group **group),
+ 0);
+
+DEFINE_STUB(spdk_sock_group_get_ctx,
+ void *,
+ (struct spdk_sock_group *group),
+ NULL);
+
+DEFINE_STUB(spdk_sock_set_priority,
+ int,
+ (struct spdk_sock *sock, int priority),
+ 0);
+
+DEFINE_STUB_V(nvmf_ns_reservation_request, (void *ctx));
+
+DEFINE_STUB_V(spdk_nvme_trid_populate_transport, (struct spdk_nvme_transport_id *trid,
+ enum spdk_nvme_transport_type trtype));
+DEFINE_STUB_V(spdk_nvmf_transport_register, (const struct spdk_nvmf_transport_ops *ops));
+
+DEFINE_STUB_V(spdk_nvmf_tgt_new_qpair, (struct spdk_nvmf_tgt *tgt, struct spdk_nvmf_qpair *qpair));
+
+DEFINE_STUB_V(nvmf_transport_qpair_abort_request,
+ (struct spdk_nvmf_qpair *qpair, struct spdk_nvmf_request *req));
+
+DEFINE_STUB_V(spdk_nvme_print_command, (uint16_t qid, struct spdk_nvme_cmd *cmd));
+DEFINE_STUB_V(spdk_nvme_print_completion, (uint16_t qid, struct spdk_nvme_cpl *cpl));
+
+struct spdk_trace_histories *g_trace_histories;
+
+struct spdk_bdev {
+ int ut_mock;
+ uint64_t blockcnt;
+};
+
+int
+spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
+ const struct spdk_nvme_transport_id *trid2)
+{
+ return 0;
+}
+
+void
+spdk_trace_register_object(uint8_t type, char id_prefix)
+{
+}
+
+void
+spdk_trace_register_description(const char *name,
+ uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_type, const char *arg1_name)
+{
+}
+
+void
+_spdk_trace_record(uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1)
+{
+}
+
+const char *
+spdk_nvme_transport_id_trtype_str(enum spdk_nvme_transport_type trtype)
+{
+ switch (trtype) {
+ case SPDK_NVME_TRANSPORT_PCIE:
+ return "PCIe";
+ case SPDK_NVME_TRANSPORT_RDMA:
+ return "RDMA";
+ case SPDK_NVME_TRANSPORT_FC:
+ return "FC";
+ default:
+ return NULL;
+ }
+}
+
+int
+spdk_nvme_transport_id_populate_trstring(struct spdk_nvme_transport_id *trid, const char *trstring)
+{
+ int len, i;
+
+ if (trstring == NULL) {
+ return -EINVAL;
+ }
+
+ len = strnlen(trstring, SPDK_NVMF_TRSTRING_MAX_LEN);
+ if (len == SPDK_NVMF_TRSTRING_MAX_LEN) {
+ return -EINVAL;
+ }
+
+ /* cast official trstring to uppercase version of input. */
+ for (i = 0; i < len; i++) {
+ trid->trstring[i] = toupper(trstring[i]);
+ }
+ return 0;
+}
+
+int
+spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
+{
+ return 0;
+}
+
+int
+spdk_nvmf_request_get_buffers(struct spdk_nvmf_request *req,
+ struct spdk_nvmf_transport_poll_group *group,
+ struct spdk_nvmf_transport *transport,
+ uint32_t length)
+{
+ /* length more than 1 io unit length will fail. */
+ if (length >= transport->opts.io_unit_size) {
+ return -EINVAL;
+ }
+
+ req->iovcnt = 1;
+ req->iov[0].iov_base = (void *)0xDEADBEEF;
+
+ return 0;
+}
+
+
+void
+nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata,
+ bool dif_insert_or_strip)
+{
+ uint64_t num_blocks;
+
+ SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
+ num_blocks = ns->bdev->blockcnt;
+ nsdata->nsze = num_blocks;
+ nsdata->ncap = num_blocks;
+ nsdata->nuse = num_blocks;
+ nsdata->nlbaf = 0;
+ nsdata->flbas.format = 0;
+ nsdata->lbaf[0].lbads = spdk_u32log2(512);
+}
+
+const char *
+spdk_nvmf_subsystem_get_sn(const struct spdk_nvmf_subsystem *subsystem)
+{
+ return subsystem->sn;
+}
+
+const char *
+spdk_nvmf_subsystem_get_mn(const struct spdk_nvmf_subsystem *subsystem)
+{
+ return subsystem->mn;
+}
+
+void
+spdk_trace_add_register_fn(struct spdk_trace_register_fn *reg_fn)
+{
+}
+
+static void
+test_nvmf_tcp_create(void)
+{
+ struct spdk_thread *thread;
+ struct spdk_nvmf_transport *transport;
+ struct spdk_nvmf_tcp_transport *ttransport;
+ struct spdk_nvmf_transport_opts opts;
+
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ spdk_set_thread(thread);
+
+ /* case 1 */
+ memset(&opts, 0, sizeof(opts));
+ opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
+ opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
+ opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
+ opts.max_io_size = UT_MAX_IO_SIZE;
+ opts.io_unit_size = UT_IO_UNIT_SIZE;
+ opts.max_aq_depth = UT_MAX_AQ_DEPTH;
+ opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
+ /* expect success */
+ transport = nvmf_tcp_create(&opts);
+ CU_ASSERT_PTR_NOT_NULL(transport);
+ ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
+ SPDK_CU_ASSERT_FATAL(ttransport != NULL);
+ transport->opts = opts;
+ CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH);
+ CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE);
+ CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE);
+ CU_ASSERT(transport->opts.io_unit_size == UT_IO_UNIT_SIZE);
+ /* destroy transport */
+ spdk_mempool_free(ttransport->transport.data_buf_pool);
+ free(ttransport);
+
+ /* case 2 */
+ memset(&opts, 0, sizeof(opts));
+ opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
+ opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
+ opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
+ opts.max_io_size = UT_MAX_IO_SIZE;
+ opts.io_unit_size = UT_MAX_IO_SIZE + 1;
+ opts.max_aq_depth = UT_MAX_AQ_DEPTH;
+ opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
+ /* expect success */
+ transport = nvmf_tcp_create(&opts);
+ CU_ASSERT_PTR_NOT_NULL(transport);
+ ttransport = SPDK_CONTAINEROF(transport, struct spdk_nvmf_tcp_transport, transport);
+ SPDK_CU_ASSERT_FATAL(ttransport != NULL);
+ transport->opts = opts;
+ CU_ASSERT(transport->opts.max_queue_depth == UT_MAX_QUEUE_DEPTH);
+ CU_ASSERT(transport->opts.max_io_size == UT_MAX_IO_SIZE);
+ CU_ASSERT(transport->opts.in_capsule_data_size == UT_IN_CAPSULE_DATA_SIZE);
+ CU_ASSERT(transport->opts.io_unit_size == UT_MAX_IO_SIZE);
+ /* destroy transport */
+ spdk_mempool_free(ttransport->transport.data_buf_pool);
+ free(ttransport);
+
+ /* case 3 */
+ memset(&opts, 0, sizeof(opts));
+ opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
+ opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
+ opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
+ opts.max_io_size = UT_MAX_IO_SIZE;
+ opts.io_unit_size = 16;
+ opts.max_aq_depth = UT_MAX_AQ_DEPTH;
+ /* expect failse */
+ transport = nvmf_tcp_create(&opts);
+ CU_ASSERT_PTR_NULL(transport);
+
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+}
+
+static void
+test_nvmf_tcp_destroy(void)
+{
+ struct spdk_thread *thread;
+ struct spdk_nvmf_transport *transport;
+ struct spdk_nvmf_transport_opts opts;
+
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ spdk_set_thread(thread);
+
+ /* case 1 */
+ memset(&opts, 0, sizeof(opts));
+ opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
+ opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
+ opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
+ opts.max_io_size = UT_MAX_IO_SIZE;
+ opts.io_unit_size = UT_IO_UNIT_SIZE;
+ opts.max_aq_depth = UT_MAX_AQ_DEPTH;
+ opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
+ transport = nvmf_tcp_create(&opts);
+ CU_ASSERT_PTR_NOT_NULL(transport);
+ transport->opts = opts;
+ /* destroy transport */
+ CU_ASSERT(nvmf_tcp_destroy(transport) == 0);
+
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+}
+
+static void
+test_nvmf_tcp_poll_group_create(void)
+{
+ struct spdk_nvmf_transport *transport;
+ struct spdk_nvmf_transport_poll_group *group;
+ struct spdk_thread *thread;
+ struct spdk_nvmf_transport_opts opts;
+ struct spdk_sock_group grp = {};
+
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ spdk_set_thread(thread);
+
+ memset(&opts, 0, sizeof(opts));
+ opts.max_queue_depth = UT_MAX_QUEUE_DEPTH;
+ opts.max_qpairs_per_ctrlr = UT_MAX_QPAIRS_PER_CTRLR;
+ opts.in_capsule_data_size = UT_IN_CAPSULE_DATA_SIZE;
+ opts.max_io_size = UT_MAX_IO_SIZE;
+ opts.io_unit_size = UT_IO_UNIT_SIZE;
+ opts.max_aq_depth = UT_MAX_AQ_DEPTH;
+ opts.num_shared_buffers = UT_NUM_SHARED_BUFFERS;
+ transport = nvmf_tcp_create(&opts);
+ CU_ASSERT_PTR_NOT_NULL(transport);
+ transport->opts = opts;
+ MOCK_SET(spdk_sock_group_create, &grp);
+ group = nvmf_tcp_poll_group_create(transport);
+ MOCK_CLEAR_P(spdk_sock_group_create);
+ SPDK_CU_ASSERT_FATAL(group);
+ group->transport = transport;
+ nvmf_tcp_poll_group_destroy(group);
+ nvmf_tcp_destroy(transport);
+
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+}
+
+static void
+test_nvmf_tcp_send_c2h_data(void)
+{
+ struct spdk_thread *thread;
+ struct spdk_nvmf_tcp_transport ttransport = {};
+ struct spdk_nvmf_tcp_qpair tqpair = {};
+ struct spdk_nvmf_tcp_req tcp_req = {};
+ struct nvme_tcp_pdu pdu = {};
+ struct spdk_nvme_tcp_c2h_data_hdr *c2h_data;
+
+ thread = spdk_thread_create(NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ spdk_set_thread(thread);
+
+ tcp_req.pdu = &pdu;
+ tcp_req.req.length = 300;
+
+ tqpair.qpair.transport = &ttransport.transport;
+ TAILQ_INIT(&tqpair.send_queue);
+
+ /* Set qpair state to make unrelated operations NOP */
+ tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
+ tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
+
+ tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
+
+ tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF;
+ tcp_req.req.iov[0].iov_len = 101;
+ tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF;
+ tcp_req.req.iov[1].iov_len = 100;
+ tcp_req.req.iov[2].iov_base = (void *)0xC0FFEE;
+ tcp_req.req.iov[2].iov_len = 99;
+ tcp_req.req.iovcnt = 3;
+ tcp_req.req.length = 300;
+
+ nvmf_tcp_send_c2h_data(&tqpair, &tcp_req);
+
+ CU_ASSERT(TAILQ_FIRST(&tqpair.send_queue) == &pdu);
+ TAILQ_REMOVE(&tqpair.send_queue, &pdu, tailq);
+
+ c2h_data = &pdu.hdr.c2h_data;
+ CU_ASSERT(c2h_data->datao == 0);
+ CU_ASSERT(c2h_data->datal = 300);
+ CU_ASSERT(c2h_data->common.plen == sizeof(*c2h_data) + 300);
+ CU_ASSERT(c2h_data->common.flags & SPDK_NVME_TCP_C2H_DATA_FLAGS_LAST_PDU);
+
+ CU_ASSERT(pdu.data_iovcnt == 3);
+ CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 101);
+ CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
+ CU_ASSERT(pdu.data_iov[1].iov_len == 100);
+ CU_ASSERT((uint64_t)pdu.data_iov[2].iov_base == 0xC0FFEE);
+ CU_ASSERT(pdu.data_iov[2].iov_len == 99);
+
+ spdk_thread_exit(thread);
+ while (!spdk_thread_is_exited(thread)) {
+ spdk_thread_poll(thread, 0, 0);
+ }
+ spdk_thread_destroy(thread);
+}
+
+#define NVMF_TCP_PDU_MAX_H2C_DATA_SIZE (128 * 1024)
+
+static void
+test_nvmf_tcp_h2c_data_hdr_handle(void)
+{
+ struct spdk_nvmf_tcp_transport ttransport = {};
+ struct spdk_nvmf_tcp_qpair tqpair = {};
+ struct nvme_tcp_pdu pdu = {};
+ struct spdk_nvmf_tcp_req tcp_req = {};
+ struct spdk_nvme_tcp_h2c_data_hdr *h2c_data;
+
+ TAILQ_INIT(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER]);
+
+ /* Set qpair state to make unrelated operations NOP */
+ tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
+ tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_ERROR;
+
+ tcp_req.req.iov[0].iov_base = (void *)0xDEADBEEF;
+ tcp_req.req.iov[0].iov_len = 101;
+ tcp_req.req.iov[1].iov_base = (void *)0xFEEDBEEF;
+ tcp_req.req.iov[1].iov_len = 99;
+ tcp_req.req.iovcnt = 2;
+ tcp_req.req.length = 200;
+
+ tcp_req.req.cmd = (union nvmf_h2c_msg *)&tcp_req.cmd;
+ tcp_req.req.cmd->nvme_cmd.cid = 1;
+ tcp_req.ttag = 2;
+
+ TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER],
+ &tcp_req, state_link);
+
+ h2c_data = &pdu.hdr.h2c_data;
+ h2c_data->cccid = 1;
+ h2c_data->ttag = 2;
+ h2c_data->datao = 0;
+ h2c_data->datal = 200;
+
+ nvmf_tcp_h2c_data_hdr_handle(&ttransport, &tqpair, &pdu);
+
+ CU_ASSERT(pdu.data_iovcnt == 2);
+ CU_ASSERT((uint64_t)pdu.data_iov[0].iov_base == 0xDEADBEEF);
+ CU_ASSERT(pdu.data_iov[0].iov_len == 101);
+ CU_ASSERT((uint64_t)pdu.data_iov[1].iov_base == 0xFEEDBEEF);
+ CU_ASSERT(pdu.data_iov[1].iov_len == 99);
+
+ CU_ASSERT(TAILQ_FIRST(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER]) ==
+ &tcp_req);
+ TAILQ_REMOVE(&tqpair.state_queue[TCP_REQUEST_STATE_TRANSFERRING_HOST_TO_CONTROLLER],
+ &tcp_req, state_link);
+}
+
+
+static void
+test_nvmf_tcp_incapsule_data_handle(void)
+{
+ struct spdk_nvmf_tcp_transport ttransport = {};
+ struct spdk_nvmf_tcp_qpair tqpair = {};
+ struct nvme_tcp_pdu *pdu;
+ union nvmf_c2h_msg rsp0 = {};
+ union nvmf_c2h_msg rsp = {};
+
+ struct spdk_nvmf_request *req_temp = NULL;
+ struct spdk_nvmf_tcp_req tcp_req2 = {};
+ struct spdk_nvmf_tcp_req tcp_req1 = {};
+
+ struct spdk_nvme_tcp_cmd *capsule_data;
+ struct spdk_nvmf_capsule_cmd *nvmf_capsule_data;
+ struct spdk_nvme_sgl_descriptor *sgl;
+
+ struct spdk_nvmf_transport_poll_group *group;
+ struct spdk_nvmf_tcp_poll_group tcp_group = {};
+ struct spdk_sock_group grp = {};
+ int i = 0;
+
+ ttransport.transport.opts.max_io_size = UT_MAX_IO_SIZE;
+ ttransport.transport.opts.io_unit_size = UT_IO_UNIT_SIZE;
+
+ tcp_group.sock_group = &grp;
+ TAILQ_INIT(&tcp_group.qpairs);
+ group = &tcp_group.group;
+ group->transport = &ttransport.transport;
+ STAILQ_INIT(&group->pending_buf_queue);
+ tqpair.group = &tcp_group;
+
+ /* init tqpair, add pdu to pdu_in_progress and wait for the buff */
+ for (i = TCP_REQUEST_STATE_FREE; i < TCP_REQUEST_NUM_STATES; i++) {
+ TAILQ_INIT(&tqpair.state_queue[i]);
+ }
+
+ TAILQ_INIT(&tqpair.send_queue);
+
+ TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_FREE], &tcp_req2, state_link);
+ tqpair.state_cntr[TCP_REQUEST_STATE_FREE]++;
+ tqpair.qpair.transport = &ttransport.transport;
+ tqpair.state = NVME_TCP_QPAIR_STATE_RUNNING;
+ tqpair.recv_state = NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PSH;
+ tqpair.qpair.state = SPDK_NVMF_QPAIR_ACTIVE;
+
+ /* init a null tcp_req into tqpair TCP_REQUEST_STATE_FREE queue */
+ tcp_req2.req.qpair = &tqpair.qpair;
+ tcp_req2.req.cmd = (union nvmf_h2c_msg *)&tcp_req2.cmd;
+ tcp_req2.req.rsp = &rsp;
+
+ /* init tcp_req1 */
+ tcp_req1.req.qpair = &tqpair.qpair;
+ tcp_req1.req.cmd = (union nvmf_h2c_msg *)&tcp_req1.cmd;
+ tcp_req1.req.rsp = &rsp0;
+ tcp_req1.state = TCP_REQUEST_STATE_NEW;
+
+ TAILQ_INSERT_TAIL(&tqpair.state_queue[TCP_REQUEST_STATE_NEW], &tcp_req1, state_link);
+ tqpair.state_cntr[TCP_REQUEST_STATE_NEW]++;
+
+ /* init pdu, make pdu need sgl buff */
+ pdu = &tqpair.pdu_in_progress;
+ capsule_data = &pdu->hdr.capsule_cmd;
+ nvmf_capsule_data = (struct spdk_nvmf_capsule_cmd *)&pdu->hdr.capsule_cmd.ccsqe;
+ sgl = &capsule_data->ccsqe.dptr.sgl1;
+
+ capsule_data->common.pdu_type = SPDK_NVME_TCP_PDU_TYPE_CAPSULE_CMD;
+ capsule_data->common.hlen = sizeof(*capsule_data);
+ capsule_data->common.plen = 1096;
+ capsule_data->ccsqe.opc = SPDK_NVME_OPC_FABRIC;
+
+ sgl->unkeyed.subtype = SPDK_NVME_SGL_SUBTYPE_TRANSPORT;
+ sgl->generic.type = SPDK_NVME_SGL_TYPE_TRANSPORT_DATA_BLOCK;
+ sgl->unkeyed.length = UT_IO_UNIT_SIZE;
+
+ nvmf_capsule_data->fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
+
+ /* insert tcp_req1 to pending_buf_queue, And this req takes precedence over the next req. */
+ nvmf_tcp_req_process(&ttransport, &tcp_req1);
+ CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req);
+
+ sgl->unkeyed.length = UT_IO_UNIT_SIZE - 1;
+
+ /* process tqpair capsule req. but we still remain req in pending_buff. */
+ nvmf_tcp_capsule_cmd_hdr_handle(&ttransport, &tqpair, &tqpair.pdu_in_progress);
+ CU_ASSERT(tqpair.recv_state == NVME_TCP_PDU_RECV_STATE_AWAIT_PDU_PAYLOAD);
+ CU_ASSERT(STAILQ_FIRST(&group->pending_buf_queue) == &tcp_req1.req);
+ STAILQ_FOREACH(req_temp, &group->pending_buf_queue, buf_link) {
+ if (req_temp == &tcp_req2.req) {
+ break;
+ }
+ }
+ CU_ASSERT(req_temp == NULL);
+ CU_ASSERT(tqpair.pdu_in_progress.req == (void *)&tcp_req2);
+}
+
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ CU_set_error_action(CUEA_ABORT);
+ CU_initialize_registry();
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+
+ CU_ADD_TEST(suite, test_nvmf_tcp_create);
+ CU_ADD_TEST(suite, test_nvmf_tcp_destroy);
+ CU_ADD_TEST(suite, test_nvmf_tcp_poll_group_create);
+ CU_ADD_TEST(suite, test_nvmf_tcp_send_c2h_data);
+ CU_ADD_TEST(suite, test_nvmf_tcp_h2c_data_hdr_handle);
+ CU_ADD_TEST(suite, test_nvmf_tcp_incapsule_data_handle);
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}