summaryrefslogtreecommitdiffstats
path: root/src/spdk/test/unit/lib
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /src/spdk/test/unit/lib
parentInitial commit. (diff)
downloadceph-upstream.tar.xz
ceph-upstream.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/test/unit/lib')
-rw-r--r--src/spdk/test/unit/lib/Makefile47
-rw-r--r--src/spdk/test/unit/lib/bdev/Makefile50
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c1214
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev_raid.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev_raid.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/bdev/bdev_raid.c/bdev_raid_ut.c2236
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/Makefile41
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c908
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/rte_crypto.h95
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/rte_cryptodev.h153
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/rte_mbuf.h148
-rw-r--r--src/spdk/test/unit/lib/bdev/crypto.c/rte_mempool.h145
-rw-r--r--src/spdk/test/unit/lib/bdev/gpt/Makefile44
-rw-r--r--src/spdk/test/unit/lib/bdev/gpt/gpt.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/gpt/gpt.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/bdev/gpt/gpt.c/gpt_ut.c297
-rw-r--r--src/spdk/test/unit/lib/bdev/mt/Makefile44
-rw-r--r--src/spdk/test/unit/lib/bdev/mt/bdev.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/mt/bdev.c/Makefile41
-rw-r--r--src/spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c1360
-rw-r--r--src/spdk/test/unit/lib/bdev/part.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/part.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/bdev/part.c/part_ut.c179
-rw-r--r--src/spdk/test/unit/lib/bdev/pmem/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/pmem/Makefile40
-rw-r--r--src/spdk/test/unit/lib/bdev/pmem/bdev_pmem_ut.c783
-rw-r--r--src/spdk/test/unit/lib/bdev/scsi_nvme.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/scsi_nvme.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/bdev/scsi_nvme.c/scsi_nvme_ut.c142
-rw-r--r--src/spdk/test/unit/lib/bdev/vbdev_lvol.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/bdev/vbdev_lvol.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut.c1410
-rw-r--r--src/spdk/test/unit/lib/blob/Makefile44
-rw-r--r--src/spdk/test/unit/lib/blob/blob.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/blob/blob.c/Makefile41
-rw-r--r--src/spdk/test/unit/lib/blob/blob.c/blob_ut.c5914
-rw-r--r--src/spdk/test/unit/lib/blob/bs_dev_common.c225
-rw-r--r--src/spdk/test/unit/lib/blob/bs_scheduler.c87
-rw-r--r--src/spdk/test/unit/lib/blobfs/Makefile44
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_async_ut/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_async_ut/Makefile41
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_async_ut/blobfs_async_ut.c522
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/Makefile41
-rw-r--r--src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/blobfs_sync_ut.c410
-rw-r--r--src/spdk/test/unit/lib/blobfs/tree.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/blobfs/tree.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/blobfs/tree.c/tree_ut.c159
-rw-r--r--src/spdk/test/unit/lib/event/Makefile44
-rw-r--r--src/spdk/test/unit/lib/event/app.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/event/app.c/Makefile42
-rw-r--r--src/spdk/test/unit/lib/event/app.c/app_ut.c195
-rw-r--r--src/spdk/test/unit/lib/event/subsystem.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/event/subsystem.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/event/subsystem.c/subsystem_ut.c304
-rw-r--r--src/spdk/test/unit/lib/ioat/Makefile44
-rw-r--r--src/spdk/test/unit/lib/ioat/ioat.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/ioat/ioat.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/ioat/ioat.c/ioat_ut.c153
-rw-r--r--src/spdk/test/unit/lib/iscsi/Makefile44
-rw-r--r--src/spdk/test/unit/lib/iscsi/common.c256
-rw-r--r--src/spdk/test/unit/lib/iscsi/conn.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/iscsi/conn.c/Makefile42
-rw-r--r--src/spdk/test/unit/lib/iscsi/conn.c/conn_ut.c404
-rw-r--r--src/spdk/test/unit/lib/iscsi/init_grp.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/iscsi/init_grp.c/Makefile41
-rw-r--r--src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp.conf31
-rw-r--r--src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp_ut.c702
-rw-r--r--src/spdk/test/unit/lib/iscsi/iscsi.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/iscsi/iscsi.c/Makefile48
-rw-r--r--src/spdk/test/unit/lib/iscsi/iscsi.c/iscsi_ut.c972
-rw-r--r--src/spdk/test/unit/lib/iscsi/param.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/iscsi/param.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/iscsi/param.c/param_ut.c397
-rw-r--r--src/spdk/test/unit/lib/iscsi/portal_grp.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/iscsi/portal_grp.c/Makefile42
-rw-r--r--src/spdk/test/unit/lib/iscsi/portal_grp.c/portal_grp_ut.c477
-rw-r--r--src/spdk/test/unit/lib/iscsi/tgt_node.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/iscsi/tgt_node.c/Makefile41
-rw-r--r--src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node.conf95
-rw-r--r--src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node_ut.c886
-rw-r--r--src/spdk/test/unit/lib/json/Makefile44
-rw-r--r--src/spdk/test/unit/lib/json/json_parse.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/json/json_parse.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/json/json_parse.c/json_parse_ut.c940
-rw-r--r--src/spdk/test/unit/lib/json/json_util.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/json/json_util.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/json/json_util.c/json_util_ut.c963
-rw-r--r--src/spdk/test/unit/lib/json/json_write.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/json/json_write.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/json/json_write.c/json_write_ut.c745
-rw-r--r--src/spdk/test/unit/lib/json_mock.c81
-rw-r--r--src/spdk/test/unit/lib/jsonrpc/Makefile44
-rw-r--r--src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/Makefile39
-rw-r--r--src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/jsonrpc_server_ut.c423
-rw-r--r--src/spdk/test/unit/lib/log/Makefile44
-rw-r--r--src/spdk/test/unit/lib/log/log.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/log/log.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/log/log.c/log_ut.c113
-rw-r--r--src/spdk/test/unit/lib/lvol/Makefile44
-rw-r--r--src/spdk/test/unit/lib/lvol/lvol.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/lvol/lvol.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/lvol/lvol.c/lvol_ut.c2127
-rw-r--r--src/spdk/test/unit/lib/nvme/Makefile47
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme.c/nvme_ut.c1135
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c1795
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut.c645
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut.c116
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns.c/nvme_ns_ut.c163
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c1440
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut.c677
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_pcie.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_pcie.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c861
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_qpair.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_qpair.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c418
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_quirks.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_quirks.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_quirks.c/nvme_quirks_ut.c102
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_rdma.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_rdma.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c298
-rw-r--r--src/spdk/test/unit/lib/nvmf/Makefile44
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c797
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c260
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut.c306
-rw-r--r--src/spdk/test/unit/lib/nvmf/request.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/request.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvmf/request.c/request_ut.c153
-rw-r--r--src/spdk/test/unit/lib/nvmf/subsystem.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/nvmf/subsystem.c/Makefile38
-rw-r--r--src/spdk/test/unit/lib/nvmf/subsystem.c/subsystem_ut.c477
-rw-r--r--src/spdk/test/unit/lib/scsi/Makefile44
-rw-r--r--src/spdk/test/unit/lib/scsi/dev.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/scsi/dev.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/scsi/dev.c/dev_ut.c681
-rw-r--r--src/spdk/test/unit/lib/scsi/lun.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/scsi/lun.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/scsi/lun.c/lun_ut.c654
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi.c/Makefile41
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi.c/scsi_ut.c80
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi_bdev.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi_bdev.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/scsi/scsi_bdev.c/scsi_bdev_ut.c988
-rw-r--r--src/spdk/test/unit/lib/sock/Makefile44
-rw-r--r--src/spdk/test/unit/lib/sock/sock.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/sock/sock.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/sock/sock.c/sock_ut.c643
-rw-r--r--src/spdk/test/unit/lib/thread/Makefile44
-rw-r--r--src/spdk/test/unit/lib/thread/thread.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/thread/thread.c/Makefile41
-rw-r--r--src/spdk/test/unit/lib/thread/thread.c/thread_ut.c501
-rw-r--r--src/spdk/test/unit/lib/util/Makefile44
-rw-r--r--src/spdk/test/unit/lib/util/base64.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/base64.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/util/base64.c/base64_ut.c268
-rw-r--r--src/spdk/test/unit/lib/util/bit_array.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/bit_array.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/util/bit_array.c/bit_array_ut.c327
-rw-r--r--src/spdk/test/unit/lib/util/cpuset.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/cpuset.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/util/cpuset.c/cpuset_ut.c265
-rw-r--r--src/spdk/test/unit/lib/util/crc16.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/crc16.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/util/crc16.c/crc16_ut.c80
-rw-r--r--src/spdk/test/unit/lib/util/crc32_ieee.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/crc32_ieee.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/util/crc32_ieee.c/crc32_ieee_ut.c83
-rw-r--r--src/spdk/test/unit/lib/util/crc32c.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/crc32c.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/util/crc32c.c/crc32c_ut.c154
-rw-r--r--src/spdk/test/unit/lib/util/string.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/util/string.c/Makefile40
-rw-r--r--src/spdk/test/unit/lib/util/string.c/string_ut.c237
-rw-r--r--src/spdk/test/unit/lib/vhost/Makefile44
-rw-r--r--src/spdk/test/unit/lib/vhost/test_vhost.c121
-rw-r--r--src/spdk/test/unit/lib/vhost/vhost.c/.gitignore1
-rw-r--r--src/spdk/test/unit/lib/vhost/vhost.c/Makefile42
-rw-r--r--src/spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c364
205 files changed, 44993 insertions, 0 deletions
diff --git a/src/spdk/test/unit/lib/Makefile b/src/spdk/test/unit/lib/Makefile
new file mode 100644
index 00000000..205835ee
--- /dev/null
+++ b/src/spdk/test/unit/lib/Makefile
@@ -0,0 +1,47 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = bdev blob blobfs event ioat iscsi json jsonrpc log lvol nvme nvmf scsi sock thread util
+ifeq ($(OS),Linux)
+DIRS-$(CONFIG_VHOST) += vhost
+endif
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/bdev/Makefile b/src/spdk/test/unit/lib/bdev/Makefile
new file mode 100644
index 00000000..61efba78
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/Makefile
@@ -0,0 +1,50 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = bdev.c part.c scsi_nvme.c gpt vbdev_lvol.c mt bdev_raid.c
+
+ifeq ($(CONFIG_CRYPTO),y)
+DIRS-y += crypto.c
+endif
+
+DIRS-$(CONFIG_PMDK) += pmem
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/bdev/bdev.c/.gitignore b/src/spdk/test/unit/lib/bdev/bdev.c/.gitignore
new file mode 100644
index 00000000..a5a22d0d
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev.c/.gitignore
@@ -0,0 +1 @@
+bdev_ut
diff --git a/src/spdk/test/unit/lib/bdev/bdev.c/Makefile b/src/spdk/test/unit/lib/bdev/bdev.c/Makefile
new file mode 100644
index 00000000..384fa27a
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2016 FUJITSU LIMITED, All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = bdev_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c b/src/spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c
new file mode 100644
index 00000000..3c14f712
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev.c/bdev_ut.c
@@ -0,0 +1,1214 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+#include "unit/lib/json_mock.c"
+
+#include "spdk/config.h"
+/* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
+#undef SPDK_CONFIG_VTUNE
+
+#include "bdev/bdev.c"
+
+DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
+ const char *name), NULL);
+DEFINE_STUB(spdk_conf_section_get_nmval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
+DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
+
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name, const char *short_name,
+ uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_is_ptr, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+
+static void
+_bdev_send_msg(spdk_thread_fn fn, void *ctx, void *thread_ctx)
+{
+ fn(ctx);
+}
+
+void
+spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
+ int *sc, int *sk, int *asc, int *ascq)
+{
+}
+
+static int
+null_init(void)
+{
+ return 0;
+}
+
+static int
+null_clean(void)
+{
+ return 0;
+}
+
+static int
+stub_destruct(void *ctx)
+{
+ return 0;
+}
+
+struct ut_expected_io {
+ uint8_t type;
+ uint64_t offset;
+ uint64_t length;
+ int iovcnt;
+ struct iovec iov[BDEV_IO_NUM_CHILD_IOV];
+ TAILQ_ENTRY(ut_expected_io) link;
+};
+
+struct bdev_ut_channel {
+ TAILQ_HEAD(, spdk_bdev_io) outstanding_io;
+ uint32_t outstanding_io_count;
+ TAILQ_HEAD(, ut_expected_io) expected_io;
+};
+
+static bool g_io_done;
+static enum spdk_bdev_io_status g_io_status;
+static uint32_t g_bdev_ut_io_device;
+static struct bdev_ut_channel *g_bdev_ut_channel;
+
+static struct ut_expected_io *
+ut_alloc_expected_io(uint8_t type, uint64_t offset, uint64_t length, int iovcnt)
+{
+ struct ut_expected_io *expected_io;
+
+ expected_io = calloc(1, sizeof(*expected_io));
+ SPDK_CU_ASSERT_FATAL(expected_io != NULL);
+
+ expected_io->type = type;
+ expected_io->offset = offset;
+ expected_io->length = length;
+ expected_io->iovcnt = iovcnt;
+
+ return expected_io;
+}
+
+static void
+ut_expected_io_set_iov(struct ut_expected_io *expected_io, int pos, void *base, size_t len)
+{
+ expected_io->iov[pos].iov_base = base;
+ expected_io->iov[pos].iov_len = len;
+}
+
+static void
+stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
+{
+ struct bdev_ut_channel *ch = spdk_io_channel_get_ctx(_ch);
+ struct ut_expected_io *expected_io;
+ struct iovec *iov, *expected_iov;
+ int i;
+
+ TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
+ ch->outstanding_io_count++;
+
+ expected_io = TAILQ_FIRST(&ch->expected_io);
+ if (expected_io == NULL) {
+ return;
+ }
+ TAILQ_REMOVE(&ch->expected_io, expected_io, link);
+
+ if (expected_io->type != SPDK_BDEV_IO_TYPE_INVALID) {
+ CU_ASSERT(bdev_io->type == expected_io->type);
+ }
+
+ if (expected_io->length == 0) {
+ free(expected_io);
+ return;
+ }
+
+ CU_ASSERT(expected_io->offset == bdev_io->u.bdev.offset_blocks);
+ CU_ASSERT(expected_io->length = bdev_io->u.bdev.num_blocks);
+
+ if (expected_io->iovcnt == 0) {
+ free(expected_io);
+ /* UNMAP, WRITE_ZEROES and FLUSH don't have iovs, so we can just return now. */
+ return;
+ }
+
+ CU_ASSERT(expected_io->iovcnt == bdev_io->u.bdev.iovcnt);
+ for (i = 0; i < expected_io->iovcnt; i++) {
+ iov = &bdev_io->u.bdev.iovs[i];
+ expected_iov = &expected_io->iov[i];
+ CU_ASSERT(iov->iov_len == expected_iov->iov_len);
+ CU_ASSERT(iov->iov_base == expected_iov->iov_base);
+ }
+
+ free(expected_io);
+}
+
+static uint32_t
+stub_complete_io(uint32_t num_to_complete)
+{
+ struct bdev_ut_channel *ch = g_bdev_ut_channel;
+ struct spdk_bdev_io *bdev_io;
+ uint32_t num_completed = 0;
+
+ while (num_completed < num_to_complete) {
+ if (TAILQ_EMPTY(&ch->outstanding_io)) {
+ break;
+ }
+ bdev_io = TAILQ_FIRST(&ch->outstanding_io);
+ TAILQ_REMOVE(&ch->outstanding_io, bdev_io, module_link);
+ ch->outstanding_io_count--;
+ spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_SUCCESS);
+ num_completed++;
+ }
+
+ return num_completed;
+}
+
+static struct spdk_io_channel *
+bdev_ut_get_io_channel(void *ctx)
+{
+ return spdk_get_io_channel(&g_bdev_ut_io_device);
+}
+
+static bool
+stub_io_type_supported(void *_bdev, enum spdk_bdev_io_type io_type)
+{
+ return true;
+}
+
+static struct spdk_bdev_fn_table fn_table = {
+ .destruct = stub_destruct,
+ .submit_request = stub_submit_request,
+ .get_io_channel = bdev_ut_get_io_channel,
+ .io_type_supported = stub_io_type_supported,
+};
+
+static int
+bdev_ut_create_ch(void *io_device, void *ctx_buf)
+{
+ struct bdev_ut_channel *ch = ctx_buf;
+
+ CU_ASSERT(g_bdev_ut_channel == NULL);
+ g_bdev_ut_channel = ch;
+
+ TAILQ_INIT(&ch->outstanding_io);
+ ch->outstanding_io_count = 0;
+ TAILQ_INIT(&ch->expected_io);
+ return 0;
+}
+
+static void
+bdev_ut_destroy_ch(void *io_device, void *ctx_buf)
+{
+ CU_ASSERT(g_bdev_ut_channel != NULL);
+ g_bdev_ut_channel = NULL;
+}
+
+static int
+bdev_ut_module_init(void)
+{
+ spdk_io_device_register(&g_bdev_ut_io_device, bdev_ut_create_ch, bdev_ut_destroy_ch,
+ sizeof(struct bdev_ut_channel), NULL);
+ return 0;
+}
+
+static void
+bdev_ut_module_fini(void)
+{
+ spdk_io_device_unregister(&g_bdev_ut_io_device, NULL);
+}
+
+struct spdk_bdev_module bdev_ut_if = {
+ .name = "bdev_ut",
+ .module_init = bdev_ut_module_init,
+ .module_fini = bdev_ut_module_fini,
+};
+
+static void vbdev_ut_examine(struct spdk_bdev *bdev);
+
+static int
+vbdev_ut_module_init(void)
+{
+ return 0;
+}
+
+static void
+vbdev_ut_module_fini(void)
+{
+}
+
+struct spdk_bdev_module vbdev_ut_if = {
+ .name = "vbdev_ut",
+ .module_init = vbdev_ut_module_init,
+ .module_fini = vbdev_ut_module_fini,
+ .examine_config = vbdev_ut_examine,
+};
+
+SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if)
+SPDK_BDEV_MODULE_REGISTER(&vbdev_ut_if)
+
+static void
+vbdev_ut_examine(struct spdk_bdev *bdev)
+{
+ spdk_bdev_module_examine_done(&vbdev_ut_if);
+}
+
+static struct spdk_bdev *
+allocate_bdev(char *name)
+{
+ struct spdk_bdev *bdev;
+ int rc;
+
+ bdev = calloc(1, sizeof(*bdev));
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+
+ bdev->name = name;
+ bdev->fn_table = &fn_table;
+ bdev->module = &bdev_ut_if;
+ bdev->blockcnt = 1024;
+ bdev->blocklen = 512;
+
+ rc = spdk_bdev_register(bdev);
+ CU_ASSERT(rc == 0);
+
+ return bdev;
+}
+
+static struct spdk_bdev *
+allocate_vbdev(char *name, struct spdk_bdev *base1, struct spdk_bdev *base2)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev *array[2];
+ int rc;
+
+ bdev = calloc(1, sizeof(*bdev));
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+
+ bdev->name = name;
+ bdev->fn_table = &fn_table;
+ bdev->module = &vbdev_ut_if;
+
+ /* vbdev must have at least one base bdev */
+ CU_ASSERT(base1 != NULL);
+
+ array[0] = base1;
+ array[1] = base2;
+
+ rc = spdk_vbdev_register(bdev, array, base2 == NULL ? 1 : 2);
+ CU_ASSERT(rc == 0);
+
+ return bdev;
+}
+
+static void
+free_bdev(struct spdk_bdev *bdev)
+{
+ spdk_bdev_unregister(bdev, NULL, NULL);
+ memset(bdev, 0xFF, sizeof(*bdev));
+ free(bdev);
+}
+
+static void
+free_vbdev(struct spdk_bdev *bdev)
+{
+ spdk_bdev_unregister(bdev, NULL, NULL);
+ memset(bdev, 0xFF, sizeof(*bdev));
+ free(bdev);
+}
+
+static void
+get_device_stat_cb(struct spdk_bdev *bdev, struct spdk_bdev_io_stat *stat, void *cb_arg, int rc)
+{
+ const char *bdev_name;
+
+ CU_ASSERT(bdev != NULL);
+ CU_ASSERT(rc == 0);
+ bdev_name = spdk_bdev_get_name(bdev);
+ CU_ASSERT_STRING_EQUAL(bdev_name, "bdev0");
+
+ free(stat);
+ free_bdev(bdev);
+}
+
+static void
+get_device_stat_test(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_io_stat *stat;
+
+ bdev = allocate_bdev("bdev0");
+ stat = calloc(1, sizeof(struct spdk_bdev_io_stat));
+ if (stat == NULL) {
+ free_bdev(bdev);
+ return;
+ }
+ spdk_bdev_get_device_stat(bdev, stat, get_device_stat_cb, NULL);
+}
+
+static void
+open_write_test(void)
+{
+ struct spdk_bdev *bdev[9];
+ struct spdk_bdev_desc *desc[9] = {};
+ int rc;
+
+ /*
+ * Create a tree of bdevs to test various open w/ write cases.
+ *
+ * bdev0 through bdev3 are physical block devices, such as NVMe
+ * namespaces or Ceph block devices.
+ *
+ * bdev4 is a virtual bdev with multiple base bdevs. This models
+ * caching or RAID use cases.
+ *
+ * bdev5 through bdev7 are all virtual bdevs with the same base
+ * bdev (except bdev7). This models partitioning or logical volume
+ * use cases.
+ *
+ * bdev7 is a virtual bdev with multiple base bdevs. One of base bdevs
+ * (bdev2) is shared with other virtual bdevs: bdev5 and bdev6. This
+ * models caching, RAID, partitioning or logical volumes use cases.
+ *
+ * bdev8 is a virtual bdev with multiple base bdevs, but these
+ * base bdevs are themselves virtual bdevs.
+ *
+ * bdev8
+ * |
+ * +----------+
+ * | |
+ * bdev4 bdev5 bdev6 bdev7
+ * | | | |
+ * +---+---+ +---+ + +---+---+
+ * | | \ | / \
+ * bdev0 bdev1 bdev2 bdev3
+ */
+
+ bdev[0] = allocate_bdev("bdev0");
+ rc = spdk_bdev_module_claim_bdev(bdev[0], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[1] = allocate_bdev("bdev1");
+ rc = spdk_bdev_module_claim_bdev(bdev[1], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[2] = allocate_bdev("bdev2");
+ rc = spdk_bdev_module_claim_bdev(bdev[2], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[3] = allocate_bdev("bdev3");
+ rc = spdk_bdev_module_claim_bdev(bdev[3], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[4] = allocate_vbdev("bdev4", bdev[0], bdev[1]);
+ rc = spdk_bdev_module_claim_bdev(bdev[4], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[5] = allocate_vbdev("bdev5", bdev[2], NULL);
+ rc = spdk_bdev_module_claim_bdev(bdev[5], NULL, &bdev_ut_if);
+ CU_ASSERT(rc == 0);
+
+ bdev[6] = allocate_vbdev("bdev6", bdev[2], NULL);
+
+ bdev[7] = allocate_vbdev("bdev7", bdev[2], bdev[3]);
+
+ bdev[8] = allocate_vbdev("bdev8", bdev[4], bdev[5]);
+
+ /* Open bdev0 read-only. This should succeed. */
+ rc = spdk_bdev_open(bdev[0], false, NULL, NULL, &desc[0]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc[0] != NULL);
+ spdk_bdev_close(desc[0]);
+
+ /*
+ * Open bdev1 read/write. This should fail since bdev1 has been claimed
+ * by a vbdev module.
+ */
+ rc = spdk_bdev_open(bdev[1], true, NULL, NULL, &desc[1]);
+ CU_ASSERT(rc == -EPERM);
+
+ /*
+ * Open bdev4 read/write. This should fail since bdev3 has been claimed
+ * by a vbdev module.
+ */
+ rc = spdk_bdev_open(bdev[4], true, NULL, NULL, &desc[4]);
+ CU_ASSERT(rc == -EPERM);
+
+ /* Open bdev4 read-only. This should succeed. */
+ rc = spdk_bdev_open(bdev[4], false, NULL, NULL, &desc[4]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc[4] != NULL);
+ spdk_bdev_close(desc[4]);
+
+ /*
+ * Open bdev8 read/write. This should succeed since it is a leaf
+ * bdev.
+ */
+ rc = spdk_bdev_open(bdev[8], true, NULL, NULL, &desc[8]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc[8] != NULL);
+ spdk_bdev_close(desc[8]);
+
+ /*
+ * Open bdev5 read/write. This should fail since bdev4 has been claimed
+ * by a vbdev module.
+ */
+ rc = spdk_bdev_open(bdev[5], true, NULL, NULL, &desc[5]);
+ CU_ASSERT(rc == -EPERM);
+
+ /* Open bdev4 read-only. This should succeed. */
+ rc = spdk_bdev_open(bdev[5], false, NULL, NULL, &desc[5]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc[5] != NULL);
+ spdk_bdev_close(desc[5]);
+
+ free_vbdev(bdev[8]);
+
+ free_vbdev(bdev[5]);
+ free_vbdev(bdev[6]);
+ free_vbdev(bdev[7]);
+
+ free_vbdev(bdev[4]);
+
+ free_bdev(bdev[0]);
+ free_bdev(bdev[1]);
+ free_bdev(bdev[2]);
+ free_bdev(bdev[3]);
+}
+
+static void
+bytes_to_blocks_test(void)
+{
+ struct spdk_bdev bdev;
+ uint64_t offset_blocks, num_blocks;
+
+ memset(&bdev, 0, sizeof(bdev));
+
+ bdev.blocklen = 512;
+
+ /* All parameters valid */
+ offset_blocks = 0;
+ num_blocks = 0;
+ CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 1024, &num_blocks) == 0);
+ CU_ASSERT(offset_blocks == 1);
+ CU_ASSERT(num_blocks == 2);
+
+ /* Offset not a block multiple */
+ CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 3, &offset_blocks, 512, &num_blocks) != 0);
+
+ /* Length not a block multiple */
+ CU_ASSERT(spdk_bdev_bytes_to_blocks(&bdev, 512, &offset_blocks, 3, &num_blocks) != 0);
+}
+
+static void
+num_blocks_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ int rc;
+
+ memset(&bdev, 0, sizeof(bdev));
+ bdev.name = "num_blocks";
+ bdev.fn_table = &fn_table;
+ bdev.module = &bdev_ut_if;
+ spdk_bdev_register(&bdev);
+ spdk_bdev_notify_blockcnt_change(&bdev, 50);
+
+ /* Growing block number */
+ CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 70) == 0);
+ /* Shrinking block number */
+ CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 30) == 0);
+
+ /* In case bdev opened */
+ rc = spdk_bdev_open(&bdev, false, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+
+ /* Growing block number */
+ CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 80) == 0);
+ /* Shrinking block number */
+ CU_ASSERT(spdk_bdev_notify_blockcnt_change(&bdev, 20) != 0);
+
+ spdk_bdev_close(desc);
+ spdk_bdev_unregister(&bdev, NULL, NULL);
+}
+
+static void
+io_valid_test(void)
+{
+ struct spdk_bdev bdev;
+
+ memset(&bdev, 0, sizeof(bdev));
+
+ bdev.blocklen = 512;
+ spdk_bdev_notify_blockcnt_change(&bdev, 100);
+
+ /* All parameters valid */
+ CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 1, 2) == true);
+
+ /* Last valid block */
+ CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 1) == true);
+
+ /* Offset past end of bdev */
+ CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 100, 1) == false);
+
+ /* Offset + length past end of bdev */
+ CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 99, 2) == false);
+
+ /* Offset near end of uint64_t range (2^64 - 1) */
+ CU_ASSERT(spdk_bdev_io_valid_blocks(&bdev, 18446744073709551615ULL, 1) == false);
+}
+
+static void
+alias_add_del_test(void)
+{
+ struct spdk_bdev *bdev[3];
+ int rc;
+
+ /* Creating and registering bdevs */
+ bdev[0] = allocate_bdev("bdev0");
+ SPDK_CU_ASSERT_FATAL(bdev[0] != 0);
+
+ bdev[1] = allocate_bdev("bdev1");
+ SPDK_CU_ASSERT_FATAL(bdev[1] != 0);
+
+ bdev[2] = allocate_bdev("bdev2");
+ SPDK_CU_ASSERT_FATAL(bdev[2] != 0);
+
+ /*
+ * Trying adding an alias identical to name.
+ * Alias is identical to name, so it can not be added to aliases list
+ */
+ rc = spdk_bdev_alias_add(bdev[0], bdev[0]->name);
+ CU_ASSERT(rc == -EEXIST);
+
+ /*
+ * Trying to add empty alias,
+ * this one should fail
+ */
+ rc = spdk_bdev_alias_add(bdev[0], NULL);
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Trying adding same alias to two different registered bdevs */
+
+ /* Alias is used first time, so this one should pass */
+ rc = spdk_bdev_alias_add(bdev[0], "proper alias 0");
+ CU_ASSERT(rc == 0);
+
+ /* Alias was added to another bdev, so this one should fail */
+ rc = spdk_bdev_alias_add(bdev[1], "proper alias 0");
+ CU_ASSERT(rc == -EEXIST);
+
+ /* Alias is used first time, so this one should pass */
+ rc = spdk_bdev_alias_add(bdev[1], "proper alias 1");
+ CU_ASSERT(rc == 0);
+
+ /* Trying removing an alias from registered bdevs */
+
+ /* Alias is not on a bdev aliases list, so this one should fail */
+ rc = spdk_bdev_alias_del(bdev[0], "not existing");
+ CU_ASSERT(rc == -ENOENT);
+
+ /* Alias is present on a bdev aliases list, so this one should pass */
+ rc = spdk_bdev_alias_del(bdev[0], "proper alias 0");
+ CU_ASSERT(rc == 0);
+
+ /* Alias is present on a bdev aliases list, so this one should pass */
+ rc = spdk_bdev_alias_del(bdev[1], "proper alias 1");
+ CU_ASSERT(rc == 0);
+
+ /* Trying to remove name instead of alias, so this one should fail, name cannot be changed or removed */
+ rc = spdk_bdev_alias_del(bdev[0], bdev[0]->name);
+ CU_ASSERT(rc != 0);
+
+ /* Trying to del all alias from empty alias list */
+ spdk_bdev_alias_del_all(bdev[2]);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev[2]->aliases));
+
+ /* Trying to del all alias from non-empty alias list */
+ rc = spdk_bdev_alias_add(bdev[2], "alias0");
+ CU_ASSERT(rc == 0);
+ rc = spdk_bdev_alias_add(bdev[2], "alias1");
+ CU_ASSERT(rc == 0);
+ spdk_bdev_alias_del_all(bdev[2]);
+ CU_ASSERT(TAILQ_EMPTY(&bdev[2]->aliases));
+
+ /* Unregister and free bdevs */
+ spdk_bdev_unregister(bdev[0], NULL, NULL);
+ spdk_bdev_unregister(bdev[1], NULL, NULL);
+ spdk_bdev_unregister(bdev[2], NULL, NULL);
+
+ free(bdev[0]);
+ free(bdev[1]);
+ free(bdev[2]);
+}
+
+static void
+io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ g_io_done = true;
+ g_io_status = bdev_io->internal.status;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+bdev_init_cb(void *arg, int rc)
+{
+ CU_ASSERT(rc == 0);
+}
+
+static void
+bdev_fini_cb(void *arg)
+{
+}
+
+struct bdev_ut_io_wait_entry {
+ struct spdk_bdev_io_wait_entry entry;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_desc *desc;
+ bool submitted;
+};
+
+static void
+io_wait_cb(void *arg)
+{
+ struct bdev_ut_io_wait_entry *entry = arg;
+ int rc;
+
+ rc = spdk_bdev_read_blocks(entry->desc, entry->io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ entry->submitted = true;
+}
+
+static void
+bdev_io_wait_test(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_opts bdev_opts = {
+ .bdev_io_pool_size = 4,
+ .bdev_io_cache_size = 2,
+ };
+ struct bdev_ut_io_wait_entry io_wait_entry;
+ struct bdev_ut_io_wait_entry io_wait_entry2;
+ int rc;
+
+ rc = spdk_bdev_set_opts(&bdev_opts);
+ CU_ASSERT(rc == 0);
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == -ENOMEM);
+
+ io_wait_entry.entry.bdev = bdev;
+ io_wait_entry.entry.cb_fn = io_wait_cb;
+ io_wait_entry.entry.cb_arg = &io_wait_entry;
+ io_wait_entry.io_ch = io_ch;
+ io_wait_entry.desc = desc;
+ io_wait_entry.submitted = false;
+ /* Cannot use the same io_wait_entry for two different calls. */
+ memcpy(&io_wait_entry2, &io_wait_entry, sizeof(io_wait_entry));
+ io_wait_entry2.entry.cb_arg = &io_wait_entry2;
+
+ /* Queue two I/O waits. */
+ rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry.entry);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(io_wait_entry.submitted == false);
+ rc = spdk_bdev_queue_io_wait(bdev, io_ch, &io_wait_entry2.entry);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(io_wait_entry2.submitted == false);
+
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
+ CU_ASSERT(io_wait_entry.submitted == true);
+ CU_ASSERT(io_wait_entry2.submitted == false);
+
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 4);
+ CU_ASSERT(io_wait_entry2.submitted == true);
+
+ stub_complete_io(4);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+}
+
+static void
+bdev_io_spans_boundary_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_bdev_io bdev_io;
+
+ memset(&bdev, 0, sizeof(bdev));
+
+ bdev.optimal_io_boundary = 0;
+ bdev_io.bdev = &bdev;
+
+ /* bdev has no optimal_io_boundary set - so this should return false. */
+ CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
+
+ bdev.optimal_io_boundary = 32;
+ bdev_io.type = SPDK_BDEV_IO_TYPE_RESET;
+
+ /* RESETs are not based on LBAs - so this should return false. */
+ CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
+
+ bdev_io.type = SPDK_BDEV_IO_TYPE_READ;
+ bdev_io.u.bdev.offset_blocks = 0;
+ bdev_io.u.bdev.num_blocks = 32;
+
+ /* This I/O run right up to, but does not cross, the boundary - so this should return false. */
+ CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == false);
+
+ bdev_io.u.bdev.num_blocks = 33;
+
+ /* This I/O spans a boundary. */
+ CU_ASSERT(_spdk_bdev_io_should_split(&bdev_io) == true);
+}
+
+static void
+bdev_io_split(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc = NULL;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_opts bdev_opts = {
+ .bdev_io_pool_size = 512,
+ .bdev_io_cache_size = 64,
+ };
+ struct iovec iov[BDEV_IO_NUM_CHILD_IOV * 2];
+ struct ut_expected_io *expected_io;
+ uint64_t i;
+ int rc;
+
+ rc = spdk_bdev_set_opts(&bdev_opts);
+ CU_ASSERT(rc == 0);
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+
+ bdev->optimal_io_boundary = 16;
+ bdev->split_on_optimal_io_boundary = false;
+
+ g_io_done = false;
+
+ /* First test that the I/O does not get split if split_on_optimal_io_boundary == false. */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 8, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 8 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ bdev->split_on_optimal_io_boundary = true;
+
+ /* Now test that a single-vector command is split correctly.
+ * Offset 14, length 8, payload 0xF000
+ * Child - Offset 14, length 2, payload 0xF000
+ * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
+ *
+ * Set up the expected values before calling spdk_bdev_read_blocks
+ */
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* spdk_bdev_read_blocks will submit the first child immediately. */
+ rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 2);
+ stub_complete_io(2);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ /* Now set up a more complex, multi-vector command that needs to be split,
+ * including splitting iovecs.
+ */
+ iov[0].iov_base = (void *)0x10000;
+ iov[0].iov_len = 512;
+ iov[1].iov_base = (void *)0x20000;
+ iov[1].iov_len = 20 * 512;
+ iov[2].iov_base = (void *)0x30000;
+ iov[2].iov_len = 11 * 512;
+
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
+ ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
+ ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
+ ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 3);
+ stub_complete_io(3);
+ CU_ASSERT(g_io_done == true);
+
+ /* Test multi vector command that needs to be split by strip and then needs to be
+ * split further due to the capacity of child iovs.
+ */
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV * 2; i++) {
+ iov[i].iov_base = (void *)((i + 1) * 0x10000);
+ iov[i].iov_len = 512;
+ }
+
+ bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 0, BDEV_IO_NUM_CHILD_IOV,
+ BDEV_IO_NUM_CHILD_IOV);
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
+ ut_expected_io_set_iov(expected_io, i, (void *)((i + 1) * 0x10000), 512);
+ }
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, BDEV_IO_NUM_CHILD_IOV,
+ BDEV_IO_NUM_CHILD_IOV, BDEV_IO_NUM_CHILD_IOV);
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV; i++) {
+ ut_expected_io_set_iov(expected_io, i,
+ (void *)((i + 1 + BDEV_IO_NUM_CHILD_IOV) * 0x10000), 512);
+ }
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
+ BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == false);
+
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ /* Test multi vector command that needs to be split by strip and then needs to be
+ * split further due to the capacity of child iovs, but fails to split. The cause
+ * of failure of split is that the length of an iovec is not multiple of block size.
+ */
+ for (i = 0; i < BDEV_IO_NUM_CHILD_IOV - 1; i++) {
+ iov[i].iov_base = (void *)((i + 1) * 0x10000);
+ iov[i].iov_len = 512;
+ }
+ iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_base = (void *)(BDEV_IO_NUM_CHILD_IOV * 0x10000);
+ iov[BDEV_IO_NUM_CHILD_IOV - 1].iov_len = 256;
+
+ bdev->optimal_io_boundary = BDEV_IO_NUM_CHILD_IOV;
+ g_io_done = false;
+ g_io_status = 0;
+
+ rc = spdk_bdev_readv_blocks(desc, io_ch, iov, BDEV_IO_NUM_CHILD_IOV * 2, 0,
+ BDEV_IO_NUM_CHILD_IOV * 2, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == true);
+ CU_ASSERT(g_io_status == SPDK_BDEV_IO_STATUS_FAILED);
+
+ /* Test a WRITE_ZEROES that would span an I/O boundary. WRITE_ZEROES should not be
+ * split, so test that.
+ */
+ bdev->optimal_io_boundary = 15;
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE_ZEROES, 9, 36, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_write_zeroes_blocks(desc, io_ch, 9, 36, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+
+ /* Test an UNMAP. This should also not be split. */
+ bdev->optimal_io_boundary = 16;
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_UNMAP, 15, 2, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_unmap_blocks(desc, io_ch, 15, 2, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+
+ /* Test a FLUSH. This should also not be split. */
+ bdev->optimal_io_boundary = 16;
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_FLUSH, 15, 2, 0);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_flush_blocks(desc, io_ch, 15, 2, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+}
+
+static void
+bdev_io_split_with_io_wait(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev_desc *desc;
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *channel;
+ struct spdk_bdev_mgmt_channel *mgmt_ch;
+ struct spdk_bdev_opts bdev_opts = {
+ .bdev_io_pool_size = 2,
+ .bdev_io_cache_size = 1,
+ };
+ struct iovec iov[3];
+ struct ut_expected_io *expected_io;
+ int rc;
+
+ rc = spdk_bdev_set_opts(&bdev_opts);
+ CU_ASSERT(rc == 0);
+ spdk_bdev_initialize(bdev_init_cb, NULL);
+
+ bdev = allocate_bdev("bdev0");
+
+ rc = spdk_bdev_open(bdev, true, NULL, NULL, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(desc != NULL);
+ io_ch = spdk_bdev_get_io_channel(desc);
+ CU_ASSERT(io_ch != NULL);
+ channel = spdk_io_channel_get_ctx(io_ch);
+ mgmt_ch = channel->shared_resource->mgmt_ch;
+
+ bdev->optimal_io_boundary = 16;
+ bdev->split_on_optimal_io_boundary = true;
+
+ rc = spdk_bdev_read_blocks(desc, io_ch, NULL, 0, 1, io_done, NULL);
+ CU_ASSERT(rc == 0);
+
+ /* Now test that a single-vector command is split correctly.
+ * Offset 14, length 8, payload 0xF000
+ * Child - Offset 14, length 2, payload 0xF000
+ * Child - Offset 16, length 6, payload 0xF000 + 2 * 512
+ *
+ * Set up the expected values before calling spdk_bdev_read_blocks
+ */
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 14, 2, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)0xF000, 2 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_READ, 16, 6, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0xF000 + 2 * 512), 6 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ /* The following children will be submitted sequentially due to the capacity of
+ * spdk_bdev_io.
+ */
+
+ /* The first child I/O will be queued to wait until an spdk_bdev_io becomes available */
+ rc = spdk_bdev_read_blocks(desc, io_ch, (void *)0xF000, 14, 8, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+
+ /* Completing the first read I/O will submit the first child */
+ stub_complete_io(1);
+ CU_ASSERT(TAILQ_EMPTY(&mgmt_ch->io_wait_queue));
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+
+ /* Completing the first child will submit the second child */
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+
+ /* Complete the second child I/O. This should result in our callback getting
+ * invoked since the parent I/O is now complete.
+ */
+ stub_complete_io(1);
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 0);
+
+ /* Now set up a more complex, multi-vector command that needs to be split,
+ * including splitting iovecs.
+ */
+ iov[0].iov_base = (void *)0x10000;
+ iov[0].iov_len = 512;
+ iov[1].iov_base = (void *)0x20000;
+ iov[1].iov_len = 20 * 512;
+ iov[2].iov_base = (void *)0x30000;
+ iov[2].iov_len = 11 * 512;
+
+ g_io_done = false;
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 14, 2, 2);
+ ut_expected_io_set_iov(expected_io, 0, (void *)0x10000, 512);
+ ut_expected_io_set_iov(expected_io, 1, (void *)0x20000, 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 16, 16, 1);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 512), 16 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ expected_io = ut_alloc_expected_io(SPDK_BDEV_IO_TYPE_WRITE, 32, 14, 2);
+ ut_expected_io_set_iov(expected_io, 0, (void *)(0x20000 + 17 * 512), 3 * 512);
+ ut_expected_io_set_iov(expected_io, 1, (void *)0x30000, 11 * 512);
+ TAILQ_INSERT_TAIL(&g_bdev_ut_channel->expected_io, expected_io, link);
+
+ rc = spdk_bdev_writev_blocks(desc, io_ch, iov, 3, 14, 32, io_done, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_io_done == false);
+
+ /* The following children will be submitted sequentially due to the capacity of
+ * spdk_bdev_io.
+ */
+
+ /* Completing the first child will submit the second child */
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == false);
+
+ /* Completing the second child will submit the third child */
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == false);
+
+ /* Completing the third child will result in our callback getting invoked
+ * since the parent I/O is now complete.
+ */
+ CU_ASSERT(g_bdev_ut_channel->outstanding_io_count == 1);
+ stub_complete_io(1);
+ CU_ASSERT(g_io_done == true);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_bdev_ut_channel->expected_io));
+
+ spdk_put_io_channel(io_ch);
+ spdk_bdev_close(desc);
+ free_bdev(bdev);
+ spdk_bdev_finish(bdev_fini_cb, NULL);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("bdev", null_init, null_clean);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "bytes_to_blocks_test", bytes_to_blocks_test) == NULL ||
+ CU_add_test(suite, "num_blocks_test", num_blocks_test) == NULL ||
+ CU_add_test(suite, "io_valid", io_valid_test) == NULL ||
+ CU_add_test(suite, "open_write", open_write_test) == NULL ||
+ CU_add_test(suite, "alias_add_del", alias_add_del_test) == NULL ||
+ CU_add_test(suite, "get_device_stat", get_device_stat_test) == NULL ||
+ CU_add_test(suite, "bdev_io_wait", bdev_io_wait_test) == NULL ||
+ CU_add_test(suite, "bdev_io_spans_boundary", bdev_io_spans_boundary_test) == NULL ||
+ CU_add_test(suite, "bdev_io_split", bdev_io_split) == NULL ||
+ CU_add_test(suite, "bdev_io_split_with_io_wait", bdev_io_split_with_io_wait) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ spdk_allocate_thread(_bdev_send_msg, NULL, NULL, NULL, "thread0");
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ spdk_free_thread();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/bdev_raid.c/.gitignore b/src/spdk/test/unit/lib/bdev/bdev_raid.c/.gitignore
new file mode 100644
index 00000000..98d1a166
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev_raid.c/.gitignore
@@ -0,0 +1 @@
+bdev_raid_ut
diff --git a/src/spdk/test/unit/lib/bdev/bdev_raid.c/Makefile b/src/spdk/test/unit/lib/bdev/bdev_raid.c/Makefile
new file mode 100644
index 00000000..9739cb44
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev_raid.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = bdev_raid_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/bdev_raid.c/bdev_raid_ut.c b/src/spdk/test/unit/lib/bdev/bdev_raid.c/bdev_raid_ut.c
new file mode 100644
index 00000000..ffa466da
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/bdev_raid.c/bdev_raid_ut.c
@@ -0,0 +1,2236 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "spdk/env.h"
+#include "spdk_internal/mock.h"
+#include "bdev/raid/bdev_raid.c"
+#include "bdev/raid/bdev_raid_rpc.c"
+
+#define MAX_BASE_DRIVES 255
+#define MAX_RAIDS 31
+#define INVALID_IO_SUBMIT 0xFFFF
+
+/* Data structure to capture the output of IO for verification */
+struct io_output {
+ struct spdk_bdev_desc *desc;
+ struct spdk_io_channel *ch;
+ uint64_t offset_blocks;
+ uint64_t num_blocks;
+ spdk_bdev_io_completion_cb cb;
+ void *cb_arg;
+ enum spdk_bdev_io_type iotype;
+};
+
+/* Different test options, more options to test can be added here */
+uint32_t g_blklen_opts[] = {512, 4096};
+uint32_t g_strip_opts[] = {64, 128, 256, 512, 1024, 2048};
+uint32_t g_iosize_opts[] = {256, 512, 1024};
+uint32_t g_max_qd_opts[] = {64, 128, 256, 512, 1024, 2048};
+
+/* Globals */
+int g_bdev_io_submit_status;
+struct io_output *g_io_output = NULL;
+uint32_t g_io_output_index;
+uint32_t g_io_comp_status;
+bool g_child_io_status_flag;
+void *rpc_req;
+uint32_t rpc_req_size;
+TAILQ_HEAD(bdev, spdk_bdev);
+struct bdev g_bdev_list;
+TAILQ_HEAD(waitq, spdk_bdev_io_wait_entry);
+struct waitq g_io_waitq;
+uint32_t g_block_len;
+uint32_t g_strip_size;
+uint32_t g_max_io_size;
+uint32_t g_max_qd;
+uint8_t g_max_base_drives;
+uint8_t g_max_raids;
+uint8_t g_ignore_io_output;
+uint8_t g_rpc_err;
+char *g_get_raids_output[MAX_RAIDS];
+uint32_t g_get_raids_count;
+uint8_t g_json_beg_res_ret_err;
+uint8_t g_json_decode_obj_err;
+uint8_t g_json_decode_obj_construct;
+uint8_t g_config_level_create = 0;
+uint8_t g_test_multi_raids;
+
+/* Set randomly test options, in every run it is different */
+static void
+set_test_opts(void)
+{
+ uint32_t seed = time(0);
+
+ /* Generate random test options */
+ srand(seed);
+ g_max_base_drives = (rand() % MAX_BASE_DRIVES) + 1;
+ g_max_raids = (rand() % MAX_RAIDS) + 1;
+ g_block_len = g_blklen_opts[rand() % SPDK_COUNTOF(g_blklen_opts)];
+ g_strip_size = g_strip_opts[rand() % SPDK_COUNTOF(g_strip_opts)];
+ g_max_io_size = g_iosize_opts[rand() % SPDK_COUNTOF(g_iosize_opts)];
+ g_max_qd = g_max_qd_opts[rand() % SPDK_COUNTOF(g_max_qd_opts)];
+
+ printf("Test Options, seed = %u\n", seed);
+ printf("blocklen = %u, strip_size = %u, max_io_size = %u, max_qd = %u, g_max_base_drives = %u, g_max_raids = %u\n",
+ g_block_len, g_strip_size, g_max_io_size, g_max_qd, g_max_base_drives, g_max_raids);
+}
+
+/* Set globals before every test run */
+static void
+set_globals(void)
+{
+ uint32_t max_splits;
+
+ g_bdev_io_submit_status = 0;
+ if (g_max_io_size < g_strip_size) {
+ max_splits = 2;
+ } else {
+ max_splits = (g_max_io_size / g_strip_size) + 1;
+ }
+ g_io_output = calloc(max_splits, sizeof(struct io_output));
+ SPDK_CU_ASSERT_FATAL(g_io_output != NULL);
+ g_io_output_index = 0;
+ memset(g_get_raids_output, 0, sizeof(g_get_raids_output));
+ g_get_raids_count = 0;
+ g_io_comp_status = 0;
+ g_ignore_io_output = 0;
+ g_config_level_create = 0;
+ g_rpc_err = 0;
+ g_test_multi_raids = 0;
+ g_child_io_status_flag = true;
+ TAILQ_INIT(&g_bdev_list);
+ TAILQ_INIT(&g_io_waitq);
+ rpc_req = NULL;
+ rpc_req_size = 0;
+ g_json_beg_res_ret_err = 0;
+ g_json_decode_obj_err = 0;
+ g_json_decode_obj_construct = 0;
+}
+
+static void
+base_bdevs_cleanup(void)
+{
+ struct spdk_bdev *bdev;
+ struct spdk_bdev *bdev_next;
+
+ if (!TAILQ_EMPTY(&g_bdev_list)) {
+ TAILQ_FOREACH_SAFE(bdev, &g_bdev_list, internal.link, bdev_next) {
+ free(bdev->name);
+ TAILQ_REMOVE(&g_bdev_list, bdev, internal.link);
+ free(bdev);
+ }
+ }
+}
+
+static void
+check_and_remove_raid_bdev(struct raid_bdev_config *raid_cfg)
+{
+ struct raid_bdev *raid_bdev;
+
+ /* Get the raid structured allocated if exists */
+ raid_bdev = raid_cfg->raid_bdev;
+ if (raid_bdev == NULL) {
+ return;
+ }
+
+ for (uint32_t i = 0; i < raid_bdev->num_base_bdevs; i++) {
+ assert(raid_bdev->base_bdev_info != NULL);
+ if (raid_bdev->base_bdev_info[i].bdev) {
+ raid_bdev_free_base_bdev_resource(raid_bdev, i);
+ }
+ }
+ assert(raid_bdev->num_base_bdevs_discovered == 0);
+ raid_bdev_cleanup(raid_bdev);
+}
+
+/* Reset globals */
+static void
+reset_globals(void)
+{
+ if (g_io_output) {
+ free(g_io_output);
+ g_io_output = NULL;
+ }
+ rpc_req = NULL;
+ rpc_req_size = 0;
+}
+
+void
+spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb,
+ uint64_t len)
+{
+ CU_ASSERT(false);
+}
+
+/* Store the IO completion status in global variable to verify by various tests */
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+ g_io_comp_status = ((status == SPDK_BDEV_IO_STATUS_SUCCESS) ? true : false);
+}
+
+/* It will cache the split IOs for verification */
+int
+spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ struct io_output *p = &g_io_output[g_io_output_index];
+ struct spdk_bdev_io *child_io;
+
+ if (g_ignore_io_output) {
+ return 0;
+ }
+
+ if (g_max_io_size < g_strip_size) {
+ SPDK_CU_ASSERT_FATAL(g_io_output_index < 2);
+ } else {
+ SPDK_CU_ASSERT_FATAL(g_io_output_index < (g_max_io_size / g_strip_size) + 1);
+ }
+ if (g_bdev_io_submit_status == 0) {
+ p->desc = desc;
+ p->ch = ch;
+ p->offset_blocks = offset_blocks;
+ p->num_blocks = num_blocks;
+ p->cb = cb;
+ p->cb_arg = cb_arg;
+ p->iotype = SPDK_BDEV_IO_TYPE_WRITE;
+ g_io_output_index++;
+ child_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(child_io != NULL);
+ cb(child_io, g_child_io_status_flag, cb_arg);
+ }
+
+ return g_bdev_io_submit_status;
+}
+
+int
+spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return 0;
+}
+
+void
+spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
+{
+ bdev->fn_table->destruct(bdev->ctxt);
+}
+
+int
+spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **_desc)
+{
+ *_desc = (void *)0x1;
+ return 0;
+}
+
+void
+spdk_put_io_channel(struct spdk_io_channel *ch)
+{
+ CU_ASSERT(ch == (void *)1);
+}
+
+struct spdk_io_channel *
+spdk_get_io_channel(void *io_device)
+{
+ return NULL;
+}
+
+void
+spdk_poller_unregister(struct spdk_poller **ppoller)
+{
+}
+
+struct spdk_poller *
+spdk_poller_register(spdk_poller_fn fn,
+ void *arg,
+ uint64_t period_microseconds)
+{
+ return (void *)1;
+}
+
+void
+spdk_io_device_unregister(void *io_device, spdk_io_device_unregister_cb unregister_cb)
+{
+}
+
+char *
+spdk_sprintf_alloc(const char *format, ...)
+{
+ return strdup(format);
+}
+
+void
+spdk_io_device_register(void *io_device, spdk_io_channel_create_cb create_cb,
+ spdk_io_channel_destroy_cb destroy_cb, uint32_t ctx_size,
+ const char *name)
+{
+}
+
+int
+spdk_json_write_name(struct spdk_json_write_ctx *w, const char *name)
+{
+ return 0;
+}
+
+int spdk_json_write_named_uint32(struct spdk_json_write_ctx *w, const char *name, uint32_t val)
+{
+ struct rpc_construct_raid_bdev *req = rpc_req;
+ if (strcmp(name, "strip_size") == 0) {
+ CU_ASSERT(req->strip_size * 1024 / g_block_len == val);
+ } else if (strcmp(name, "blocklen_shift") == 0) {
+ CU_ASSERT(spdk_u32log2(g_block_len) == val);
+ } else if (strcmp(name, "raid_level") == 0) {
+ CU_ASSERT(req->raid_level == val);
+ } else if (strcmp(name, "num_base_bdevs") == 0) {
+ CU_ASSERT(req->base_bdevs.num_base_bdevs == val);
+ } else if (strcmp(name, "state") == 0) {
+ CU_ASSERT(val == RAID_BDEV_STATE_ONLINE);
+ } else if (strcmp(name, "destruct_called") == 0) {
+ CU_ASSERT(val == 0);
+ } else if (strcmp(name, "num_base_bdevs_discovered") == 0) {
+ CU_ASSERT(req->base_bdevs.num_base_bdevs == val);
+ }
+ return 0;
+}
+
+int spdk_json_write_named_string(struct spdk_json_write_ctx *w, const char *name, const char *val)
+{
+ return 0;
+}
+
+int
+spdk_json_write_object_begin(struct spdk_json_write_ctx *w)
+{
+ return 0;
+}
+
+int
+spdk_json_write_named_object_begin(struct spdk_json_write_ctx *w, const char *name)
+{
+ return 0;
+}
+
+int
+spdk_json_write_named_array_begin(struct spdk_json_write_ctx *w, const char *name)
+{
+ return 0;
+}
+
+int
+spdk_json_write_array_end(struct spdk_json_write_ctx *w)
+{
+ return 0;
+}
+
+int
+spdk_json_write_object_end(struct spdk_json_write_ctx *w)
+{
+ return 0;
+}
+
+int
+spdk_json_write_bool(struct spdk_json_write_ctx *w, bool val)
+{
+ return 0;
+}
+
+int spdk_json_write_null(struct spdk_json_write_ctx *w)
+{
+ return 0;
+}
+
+struct spdk_io_channel *
+spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
+{
+ return (void *)1;
+}
+
+void
+spdk_for_each_thread(spdk_thread_fn fn, void *ctx, spdk_thread_fn cpl)
+{
+ fn(ctx);
+ cpl(ctx);
+}
+
+struct spdk_thread *
+spdk_get_thread(void)
+{
+ return NULL;
+}
+
+void
+spdk_thread_send_msg(const struct spdk_thread *thread, spdk_thread_fn fn, void *ctx)
+{
+ fn(ctx);
+}
+
+uint32_t
+spdk_env_get_current_core(void)
+{
+ return 0;
+}
+
+void
+spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
+{
+ if (bdev_io) {
+ free(bdev_io);
+ }
+}
+
+/* It will cache split IOs for verification */
+int
+spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ struct io_output *p = &g_io_output[g_io_output_index];
+ struct spdk_bdev_io *child_io;
+
+ if (g_ignore_io_output) {
+ return 0;
+ }
+
+ SPDK_CU_ASSERT_FATAL(g_io_output_index <= (g_max_io_size / g_strip_size) + 1);
+ if (g_bdev_io_submit_status == 0) {
+ p->desc = desc;
+ p->ch = ch;
+ p->offset_blocks = offset_blocks;
+ p->num_blocks = num_blocks;
+ p->cb = cb;
+ p->cb_arg = cb_arg;
+ p->iotype = SPDK_BDEV_IO_TYPE_READ;
+ g_io_output_index++;
+ child_io = calloc(1, sizeof(struct spdk_bdev_io));
+ SPDK_CU_ASSERT_FATAL(child_io != NULL);
+ cb(child_io, g_child_io_status_flag, cb_arg);
+ }
+
+ return g_bdev_io_submit_status;
+}
+
+void
+spdk_bdev_module_release_bdev(struct spdk_bdev *bdev)
+{
+ CU_ASSERT(bdev->internal.claim_module != NULL);
+ bdev->internal.claim_module = NULL;
+}
+
+void
+spdk_bdev_module_examine_done(struct spdk_bdev_module *module)
+{
+}
+
+struct spdk_conf_section *
+spdk_conf_first_section(struct spdk_conf *cp)
+{
+ if (g_config_level_create) {
+ return (void *) 0x1;
+ }
+
+ return NULL;
+}
+
+bool
+spdk_conf_section_match_prefix(const struct spdk_conf_section *sp, const char *name_prefix)
+{
+ if (g_config_level_create) {
+ return true;
+ }
+
+ return false;
+}
+
+char *
+spdk_conf_section_get_val(struct spdk_conf_section *sp, const char *key)
+{
+ struct rpc_construct_raid_bdev *req = rpc_req;
+
+ if (g_config_level_create) {
+ if (strcmp(key, "Name") == 0) {
+ return req->name;
+ }
+ }
+
+ return NULL;
+}
+
+int
+spdk_conf_section_get_intval(struct spdk_conf_section *sp, const char *key)
+{
+ struct rpc_construct_raid_bdev *req = rpc_req;
+
+ if (g_config_level_create) {
+ if (strcmp(key, "StripSize") == 0) {
+ return req->strip_size;
+ } else if (strcmp(key, "NumDevices") == 0) {
+ return req->base_bdevs.num_base_bdevs;
+ } else if (strcmp(key, "RaidLevel") == 0) {
+ return req->raid_level;
+ }
+ }
+
+ return 0;
+}
+
+struct spdk_conf_section *
+spdk_conf_next_section(struct spdk_conf_section *sp)
+{
+ return NULL;
+}
+
+char *
+spdk_conf_section_get_nmval(struct spdk_conf_section *sp, const char *key, int idx1, int idx2)
+{
+ struct rpc_construct_raid_bdev *req = rpc_req;
+
+ if (g_config_level_create) {
+ if (strcmp(key, "Devices") == 0) {
+ if (idx2 >= g_max_base_drives) {
+ return NULL;
+ }
+ return req->base_bdevs.base_bdevs[idx2];
+ }
+ }
+
+ return NULL;
+}
+
+void
+spdk_bdev_close(struct spdk_bdev_desc *desc)
+{
+}
+
+int
+spdk_bdev_module_claim_bdev(struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
+ struct spdk_bdev_module *module)
+{
+ if (bdev->internal.claim_module != NULL) {
+ return -1;
+ }
+ bdev->internal.claim_module = module;
+ return 0;
+}
+
+int
+spdk_bdev_register(struct spdk_bdev *bdev)
+{
+ return 0;
+}
+
+uint32_t
+spdk_env_get_last_core(void)
+{
+ return 0;
+}
+
+int
+spdk_json_decode_string(const struct spdk_json_val *val, void *out)
+{
+ return 0;
+}
+
+int
+spdk_json_decode_object(const struct spdk_json_val *values,
+ const struct spdk_json_object_decoder *decoders, size_t num_decoders, void *out)
+{
+ struct rpc_construct_raid_bdev *req, *_out;
+ size_t i;
+
+ if (g_json_decode_obj_err) {
+ return -1;
+ } else if (g_json_decode_obj_construct) {
+ req = rpc_req;
+ _out = out;
+
+ _out->name = strdup(req->name);
+ SPDK_CU_ASSERT_FATAL(_out->name != NULL);
+ _out->strip_size = req->strip_size;
+ _out->raid_level = req->raid_level;
+ _out->base_bdevs.num_base_bdevs = req->base_bdevs.num_base_bdevs;
+ for (i = 0; i < req->base_bdevs.num_base_bdevs; i++) {
+ _out->base_bdevs.base_bdevs[i] = strdup(req->base_bdevs.base_bdevs[i]);
+ SPDK_CU_ASSERT_FATAL(_out->base_bdevs.base_bdevs[i]);
+ }
+ } else {
+ memcpy(out, rpc_req, rpc_req_size);
+ }
+
+ return 0;
+}
+
+struct spdk_json_write_ctx *
+spdk_jsonrpc_begin_result(struct spdk_jsonrpc_request *request)
+{
+ if (g_json_beg_res_ret_err) {
+ return NULL;
+ } else {
+ return (void *)1;
+ }
+}
+
+int
+spdk_json_write_array_begin(struct spdk_json_write_ctx *w)
+{
+ return 0;
+}
+
+int
+spdk_json_write_string(struct spdk_json_write_ctx *w, const char *val)
+{
+ if (g_test_multi_raids) {
+ g_get_raids_output[g_get_raids_count] = strdup(val);
+ SPDK_CU_ASSERT_FATAL(g_get_raids_output[g_get_raids_count] != NULL);
+ g_get_raids_count++;
+ }
+
+ return 0;
+}
+
+void
+spdk_jsonrpc_send_error_response(struct spdk_jsonrpc_request *request,
+ int error_code, const char *msg)
+{
+ g_rpc_err = 1;
+}
+
+void
+spdk_jsonrpc_send_error_response_fmt(struct spdk_jsonrpc_request *request,
+ int error_code, const char *fmt, ...)
+{
+ g_rpc_err = 1;
+}
+
+void
+spdk_jsonrpc_end_result(struct spdk_jsonrpc_request *request, struct spdk_json_write_ctx *w)
+{
+}
+
+struct spdk_bdev *
+spdk_bdev_get_by_name(const char *bdev_name)
+{
+ struct spdk_bdev *bdev;
+
+ if (!TAILQ_EMPTY(&g_bdev_list)) {
+ TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) {
+ if (strcmp(bdev_name, bdev->name) == 0) {
+ return bdev;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+const char *
+spdk_strerror(int errnum)
+{
+ return NULL;
+}
+
+int
+spdk_json_decode_array(const struct spdk_json_val *values, spdk_json_decode_fn decode_func,
+ void *out, size_t max_size, size_t *out_size, size_t stride)
+{
+ return 0;
+}
+
+void
+spdk_rpc_register_method(const char *method, spdk_rpc_method_handler func, uint32_t state_mask)
+{
+}
+
+int
+spdk_json_decode_uint32(const struct spdk_json_val *val, void *out)
+{
+ return 0;
+}
+
+
+void
+spdk_bdev_module_list_add(struct spdk_bdev_module *bdev_module)
+{
+}
+
+static void
+bdev_io_cleanup(struct spdk_bdev_io *bdev_io)
+{
+ if (bdev_io->u.bdev.iovs) {
+ if (bdev_io->u.bdev.iovs->iov_base) {
+ free(bdev_io->u.bdev.iovs->iov_base);
+ bdev_io->u.bdev.iovs->iov_base = NULL;
+ }
+ free(bdev_io->u.bdev.iovs);
+ bdev_io->u.bdev.iovs = NULL;
+ }
+}
+
+static void
+bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_bdev *bdev,
+ uint64_t lba, uint64_t blocks, int16_t iotype)
+{
+ bdev_io->bdev = bdev;
+ bdev_io->u.bdev.offset_blocks = lba;
+ bdev_io->u.bdev.num_blocks = blocks;
+ bdev_io->type = iotype;
+ bdev_io->u.bdev.iovcnt = 1;
+ bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec));
+ SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL);
+ bdev_io->u.bdev.iovs->iov_base = calloc(1, bdev_io->u.bdev.num_blocks * g_block_len);
+ SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs->iov_base != NULL);
+ bdev_io->u.bdev.iovs->iov_len = bdev_io->u.bdev.num_blocks * g_block_len;
+ bdev_io->u.bdev.iovs = bdev_io->u.bdev.iovs;
+}
+
+static void
+verify_io(struct spdk_bdev_io *bdev_io, uint8_t num_base_drives,
+ struct raid_bdev_io_channel *ch_ctx, struct raid_bdev *raid_bdev, uint32_t io_status)
+{
+ uint32_t strip_shift = spdk_u32log2(g_strip_size);
+ uint64_t start_strip = bdev_io->u.bdev.offset_blocks >> strip_shift;
+ uint64_t end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >>
+ strip_shift;
+ uint32_t splits_reqd = (end_strip - start_strip + 1);
+ uint32_t strip;
+ uint64_t pd_strip;
+ uint64_t pd_idx;
+ uint32_t offset_in_strip;
+ uint64_t pd_lba;
+ uint64_t pd_blocks;
+ uint32_t index = 0;
+ uint8_t *buf = bdev_io->u.bdev.iovs->iov_base;
+
+ if (io_status == INVALID_IO_SUBMIT) {
+ CU_ASSERT(g_io_comp_status == false);
+ return;
+ }
+ SPDK_CU_ASSERT_FATAL(raid_bdev != NULL);
+ SPDK_CU_ASSERT_FATAL(num_base_drives != 0);
+
+ CU_ASSERT(splits_reqd == g_io_output_index);
+ for (strip = start_strip; strip <= end_strip; strip++, index++) {
+ pd_strip = strip / num_base_drives;
+ pd_idx = strip % num_base_drives;
+ if (strip == start_strip) {
+ offset_in_strip = bdev_io->u.bdev.offset_blocks & (g_strip_size - 1);
+ pd_lba = (pd_strip << strip_shift) + offset_in_strip;
+ if (strip == end_strip) {
+ pd_blocks = bdev_io->u.bdev.num_blocks;
+ } else {
+ pd_blocks = g_strip_size - offset_in_strip;
+ }
+ } else if (strip == end_strip) {
+ pd_lba = pd_strip << strip_shift;
+ pd_blocks = ((bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) &
+ (g_strip_size - 1)) + 1;
+ } else {
+ pd_lba = pd_strip << raid_bdev->strip_size_shift;
+ pd_blocks = raid_bdev->strip_size;
+ }
+ CU_ASSERT(pd_lba == g_io_output[index].offset_blocks);
+ CU_ASSERT(pd_blocks == g_io_output[index].num_blocks);
+ CU_ASSERT(ch_ctx->base_channel[pd_idx] == g_io_output[index].ch);
+ CU_ASSERT(raid_bdev->base_bdev_info[pd_idx].desc == g_io_output[index].desc);
+ CU_ASSERT(bdev_io->type == g_io_output[index].iotype);
+ buf += (pd_blocks << spdk_u32log2(g_block_len));
+ }
+ CU_ASSERT(g_io_comp_status == io_status);
+}
+
+static void
+verify_raid_config_present(const char *name, bool presence)
+{
+ struct raid_bdev_config *raid_cfg;
+ bool cfg_found;
+
+ cfg_found = false;
+
+ TAILQ_FOREACH(raid_cfg, &g_spdk_raid_config.raid_bdev_config_head, link) {
+ if (raid_cfg->name != NULL) {
+ if (strcmp(name, raid_cfg->name) == 0) {
+ cfg_found = true;
+ break;
+ }
+ }
+ }
+
+ if (presence == true) {
+ CU_ASSERT(cfg_found == true);
+ } else {
+ CU_ASSERT(cfg_found == false);
+ }
+}
+
+static void
+verify_raid_bdev_present(const char *name, bool presence)
+{
+ struct raid_bdev *pbdev;
+ bool pbdev_found;
+
+ pbdev_found = false;
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, name) == 0) {
+ pbdev_found = true;
+ break;
+ }
+ }
+ if (presence == true) {
+ CU_ASSERT(pbdev_found == true);
+ } else {
+ CU_ASSERT(pbdev_found == false);
+ }
+}
+static void
+verify_raid_config(struct rpc_construct_raid_bdev *r, bool presence)
+{
+ struct raid_bdev_config *raid_cfg = NULL;
+ uint32_t i;
+ int val;
+
+ TAILQ_FOREACH(raid_cfg, &g_spdk_raid_config.raid_bdev_config_head, link) {
+ if (strcmp(r->name, raid_cfg->name) == 0) {
+ if (presence == false) {
+ break;
+ }
+ CU_ASSERT(raid_cfg->raid_bdev != NULL);
+ CU_ASSERT(raid_cfg->strip_size == r->strip_size);
+ CU_ASSERT(raid_cfg->num_base_bdevs == r->base_bdevs.num_base_bdevs);
+ CU_ASSERT(raid_cfg->raid_level == r->raid_level);
+ if (raid_cfg->base_bdev != NULL) {
+ for (i = 0; i < raid_cfg->num_base_bdevs; i++) {
+ val = strcmp(raid_cfg->base_bdev[i].name, r->base_bdevs.base_bdevs[i]);
+ CU_ASSERT(val == 0);
+ }
+ }
+ break;
+ }
+ }
+
+ if (presence == true) {
+ CU_ASSERT(raid_cfg != NULL);
+ } else {
+ CU_ASSERT(raid_cfg == NULL);
+ }
+}
+
+static void
+verify_raid_bdev(struct rpc_construct_raid_bdev *r, bool presence, uint32_t raid_state)
+{
+ struct raid_bdev *pbdev;
+ uint32_t i;
+ struct spdk_bdev *bdev = NULL;
+ bool pbdev_found;
+ uint64_t min_blockcnt = 0xFFFFFFFFFFFFFFFF;
+
+ pbdev_found = false;
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, r->name) == 0) {
+ pbdev_found = true;
+ if (presence == false) {
+ break;
+ }
+ CU_ASSERT(pbdev->config->raid_bdev == pbdev);
+ CU_ASSERT(pbdev->base_bdev_info != NULL);
+ CU_ASSERT(pbdev->strip_size == ((r->strip_size * 1024) / g_block_len));
+ CU_ASSERT(pbdev->strip_size_shift == spdk_u32log2(((r->strip_size * 1024) / g_block_len)));
+ CU_ASSERT(pbdev->blocklen_shift == spdk_u32log2(g_block_len));
+ CU_ASSERT(pbdev->state == raid_state);
+ CU_ASSERT(pbdev->num_base_bdevs == r->base_bdevs.num_base_bdevs);
+ CU_ASSERT(pbdev->num_base_bdevs_discovered == r->base_bdevs.num_base_bdevs);
+ CU_ASSERT(pbdev->raid_level == r->raid_level);
+ CU_ASSERT(pbdev->destruct_called == false);
+ for (i = 0; i < pbdev->num_base_bdevs; i++) {
+ if (pbdev->base_bdev_info && pbdev->base_bdev_info[i].bdev) {
+ bdev = spdk_bdev_get_by_name(pbdev->base_bdev_info[i].bdev->name);
+ CU_ASSERT(bdev != NULL);
+ CU_ASSERT(pbdev->base_bdev_info[i].remove_scheduled == false);
+ } else {
+ CU_ASSERT(0);
+ }
+
+ if (bdev && bdev->blockcnt < min_blockcnt) {
+ min_blockcnt = bdev->blockcnt;
+ }
+ }
+ CU_ASSERT((((min_blockcnt / (r->strip_size * 1024 / g_block_len)) * (r->strip_size * 1024 /
+ g_block_len)) * r->base_bdevs.num_base_bdevs) == pbdev->bdev.blockcnt);
+ CU_ASSERT(strcmp(pbdev->bdev.product_name, "Pooled Device") == 0);
+ CU_ASSERT(pbdev->bdev.write_cache == 0);
+ CU_ASSERT(pbdev->bdev.blocklen == g_block_len);
+ if (pbdev->num_base_bdevs > 1) {
+ CU_ASSERT(pbdev->bdev.optimal_io_boundary == pbdev->strip_size);
+ CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == true);
+ } else {
+ CU_ASSERT(pbdev->bdev.optimal_io_boundary == 0);
+ CU_ASSERT(pbdev->bdev.split_on_optimal_io_boundary == false);
+ }
+ CU_ASSERT(pbdev->bdev.ctxt == pbdev);
+ CU_ASSERT(pbdev->bdev.fn_table == &g_raid_bdev_fn_table);
+ CU_ASSERT(pbdev->bdev.module == &g_raid_if);
+ break;
+ }
+ }
+ if (presence == true) {
+ CU_ASSERT(pbdev_found == true);
+ } else {
+ CU_ASSERT(pbdev_found == false);
+ }
+ pbdev_found = false;
+ if (raid_state == RAID_BDEV_STATE_ONLINE) {
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_configured_list, state_link) {
+ if (strcmp(pbdev->bdev.name, r->name) == 0) {
+ pbdev_found = true;
+ break;
+ }
+ }
+ } else if (raid_state == RAID_BDEV_STATE_CONFIGURING) {
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_configuring_list, state_link) {
+ if (strcmp(pbdev->bdev.name, r->name) == 0) {
+ pbdev_found = true;
+ break;
+ }
+ }
+ } else if (raid_state == RAID_BDEV_STATE_OFFLINE) {
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_offline_list, state_link) {
+ if (strcmp(pbdev->bdev.name, r->name) == 0) {
+ pbdev_found = true;
+ break;
+ }
+ }
+ }
+ if (presence == true) {
+ CU_ASSERT(pbdev_found == true);
+ } else {
+ CU_ASSERT(pbdev_found == false);
+ }
+}
+
+int
+spdk_bdev_queue_io_wait(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
+ struct spdk_bdev_io_wait_entry *entry)
+{
+ CU_ASSERT(bdev == entry->bdev);
+ CU_ASSERT(entry->cb_fn != NULL);
+ CU_ASSERT(entry->cb_arg != NULL);
+ TAILQ_INSERT_TAIL(&g_io_waitq, entry, link);
+ return 0;
+}
+
+
+static uint32_t
+get_num_elts_in_waitq(void)
+{
+ struct spdk_bdev_io_wait_entry *ele;
+ uint32_t count = 0;
+
+ TAILQ_FOREACH(ele, &g_io_waitq, link) {
+ count++;
+ }
+
+ return count;
+}
+
+static void
+process_io_waitq(void)
+{
+ struct spdk_bdev_io_wait_entry *ele;
+ struct spdk_bdev_io_wait_entry *next_ele;
+
+ TAILQ_FOREACH_SAFE(ele, &g_io_waitq, link, next_ele) {
+ TAILQ_REMOVE(&g_io_waitq, ele, link);
+ ele->cb_fn(ele->cb_arg);
+ }
+}
+
+static void
+verify_get_raids(struct rpc_construct_raid_bdev *construct_req,
+ uint8_t g_max_raids,
+ char **g_get_raids_output, uint32_t g_get_raids_count)
+{
+ uint32_t i, j;
+ bool found;
+
+ CU_ASSERT(g_max_raids == g_get_raids_count);
+ if (g_max_raids == g_get_raids_count) {
+ for (i = 0; i < g_max_raids; i++) {
+ found = false;
+ for (j = 0; j < g_max_raids; j++) {
+ if (construct_req[i].name && strcmp(construct_req[i].name, g_get_raids_output[i]) == 0) {
+ found = true;
+ break;
+ }
+ }
+ CU_ASSERT(found == true);
+ }
+ }
+}
+
+static void
+create_base_bdevs(uint32_t bbdev_start_idx)
+{
+ uint32_t i;
+ struct spdk_bdev *base_bdev;
+ char name[16];
+ uint16_t num_chars;
+
+ for (i = 0; i < g_max_base_drives; i++, bbdev_start_idx++) {
+ num_chars = snprintf(name, 16, "%s%u%s", "Nvme", bbdev_start_idx, "n1");
+ name[num_chars] = '\0';
+ base_bdev = calloc(1, sizeof(struct spdk_bdev));
+ SPDK_CU_ASSERT_FATAL(base_bdev != NULL);
+ base_bdev->name = strdup(name);
+ SPDK_CU_ASSERT_FATAL(base_bdev->name != NULL);
+ base_bdev->blocklen = g_block_len;
+ base_bdev->blockcnt = (uint64_t)1024 * 1024 * 1024 * 1024;
+ TAILQ_INSERT_TAIL(&g_bdev_list, base_bdev, internal.link);
+ }
+}
+
+static void
+create_test_req(struct rpc_construct_raid_bdev *r, const char *raid_name, uint32_t bbdev_start_idx,
+ bool create_base_bdev)
+{
+ uint32_t i;
+ char name[16];
+ uint16_t num_chars;
+ uint32_t bbdev_idx = bbdev_start_idx;
+
+ r->name = strdup(raid_name);
+ SPDK_CU_ASSERT_FATAL(r->name != NULL);
+ r->strip_size = (g_strip_size * g_block_len) / 1024;
+ r->raid_level = 0;
+ r->base_bdevs.num_base_bdevs = g_max_base_drives;
+ for (i = 0; i < g_max_base_drives; i++, bbdev_idx++) {
+ num_chars = snprintf(name, 16, "%s%u%s", "Nvme", bbdev_idx, "n1");
+ name[num_chars] = '\0';
+ r->base_bdevs.base_bdevs[i] = strdup(name);
+ SPDK_CU_ASSERT_FATAL(r->base_bdevs.base_bdevs[i] != NULL);
+ }
+ if (create_base_bdev == true) {
+ create_base_bdevs(bbdev_start_idx);
+ }
+}
+
+static void
+free_test_req(struct rpc_construct_raid_bdev *r)
+{
+ uint8_t i;
+
+ free(r->name);
+ for (i = 0; i < r->base_bdevs.num_base_bdevs; i++) {
+ free(r->base_bdevs.base_bdevs[i]);
+ }
+}
+
+static void
+test_construct_raid(void)
+{
+ struct rpc_construct_raid_bdev req;
+ struct rpc_destroy_raid_bdev destroy_req;
+
+ set_globals();
+ create_test_req(&req, "raid1", 0, true);
+ rpc_req = &req;
+ rpc_req_size = sizeof(req);
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present(req.name, false);
+ verify_raid_bdev_present(req.name, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ free_test_req(&req);
+
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_destroy_raid(void)
+{
+ struct rpc_construct_raid_bdev construct_req;
+ struct rpc_destroy_raid_bdev destroy_req;
+
+ set_globals();
+ create_test_req(&construct_req, "raid1", 0, true);
+ rpc_req = &construct_req;
+ rpc_req_size = sizeof(construct_req);
+ CU_ASSERT(raid_bdev_init() == 0);
+ verify_raid_config_present(construct_req.name, false);
+ verify_raid_bdev_present(construct_req.name, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&construct_req, true);
+ verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
+ free_test_req(&construct_req);
+
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_construct_raid_invalid_args(void)
+{
+ struct rpc_construct_raid_bdev req;
+ struct rpc_destroy_raid_bdev destroy_req;
+ struct raid_bdev_config *raid_cfg;
+
+ set_globals();
+ rpc_req = &req;
+ rpc_req_size = sizeof(req);
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ create_test_req(&req, "raid1", 0, true);
+ verify_raid_config_present(req.name, false);
+ verify_raid_bdev_present(req.name, false);
+ req.raid_level = 1;
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_test_req(&req, "raid1", 0, false);
+ verify_raid_config_present(req.name, false);
+ verify_raid_bdev_present(req.name, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_err = 1;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ g_json_decode_obj_err = 0;
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_test_req(&req, "raid1", 0, false);
+ req.strip_size = 1231;
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_test_req(&req, "raid1", 0, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ free_test_req(&req);
+
+ create_test_req(&req, "raid1", 0, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+
+ create_test_req(&req, "raid2", 0, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+ verify_raid_config_present("raid2", false);
+ verify_raid_bdev_present("raid2", false);
+
+ create_test_req(&req, "raid2", g_max_base_drives, true);
+ free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]);
+ req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme0n1");
+ SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ free_test_req(&req);
+ verify_raid_config_present("raid2", false);
+ verify_raid_bdev_present("raid2", false);
+
+ create_test_req(&req, "raid2", g_max_base_drives, true);
+ free(req.base_bdevs.base_bdevs[g_max_base_drives - 1]);
+ req.base_bdevs.base_bdevs[g_max_base_drives - 1] = strdup("Nvme100000n1");
+ SPDK_CU_ASSERT_FATAL(req.base_bdevs.base_bdevs[g_max_base_drives - 1] != NULL);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid2", true);
+ verify_raid_bdev_present("raid2", true);
+ raid_cfg = raid_bdev_config_find_by_name("raid2");
+ SPDK_CU_ASSERT_FATAL(raid_cfg != NULL);
+ check_and_remove_raid_bdev(raid_cfg);
+ raid_bdev_config_cleanup(raid_cfg);
+
+ create_test_req(&req, "raid2", g_max_base_drives, false);
+ g_rpc_err = 0;
+ g_json_beg_res_ret_err = 1;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid2", true);
+ verify_raid_bdev_present("raid2", true);
+ verify_raid_config_present("raid1", true);
+ verify_raid_bdev_present("raid1", true);
+ g_json_beg_res_ret_err = 0;
+
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ destroy_req.name = strdup("raid2");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_destroy_raid_invalid_args(void)
+{
+ struct rpc_construct_raid_bdev construct_req;
+ struct rpc_destroy_raid_bdev destroy_req;
+
+ set_globals();
+ create_test_req(&construct_req, "raid1", 0, true);
+ rpc_req = &construct_req;
+ rpc_req_size = sizeof(construct_req);
+ CU_ASSERT(raid_bdev_init() == 0);
+ verify_raid_config_present(construct_req.name, false);
+ verify_raid_bdev_present(construct_req.name, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&construct_req, true);
+ verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
+ free_test_req(&construct_req);
+
+ destroy_req.name = strdup("raid2");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+
+ destroy_req.name = strdup("raid1");
+ g_rpc_err = 0;
+ g_json_decode_obj_err = 1;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ g_json_decode_obj_err = 0;
+ g_rpc_err = 0;
+ free(destroy_req.name);
+ verify_raid_config_present("raid1", true);
+ verify_raid_bdev_present("raid1", true);
+
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_io_channel(void)
+{
+ struct rpc_construct_raid_bdev req;
+ struct rpc_destroy_raid_bdev destroy_req;
+ struct raid_bdev *pbdev;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint32_t i;
+
+ set_globals();
+ create_test_req(&req, "raid1", 0, true);
+ rpc_req = &req;
+ rpc_req_size = sizeof(req);
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present(req.name, false);
+ verify_raid_bdev_present(req.name, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, req.name) == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch_ctx = calloc(1, sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == (void *)0x1);
+ }
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ free_test_req(&req);
+
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ free(ch_ctx);
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_write_io(void)
+{
+ struct rpc_construct_raid_bdev req;
+ struct rpc_destroy_raid_bdev destroy_req;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint32_t i;
+ struct spdk_bdev_io *bdev_io;
+ uint32_t count;
+ uint64_t io_len;
+ uint64_t lba;
+
+ set_globals();
+ create_test_req(&req, "raid1", 0, true);
+ rpc_req = &req;
+ rpc_req_size = sizeof(req);
+ CU_ASSERT(raid_bdev_init() == 0);
+ verify_raid_config_present(req.name, false);
+ verify_raid_bdev_present(req.name, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, req.name) == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(ch);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == (void *)0x1);
+ }
+
+ lba = 0;
+ for (count = 0; count < g_max_qd; count++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = (rand() % g_strip_size) + 1;
+ bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
+ lba += g_strip_size;
+ memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
+ g_child_io_status_flag);
+ bdev_io_cleanup(bdev_io);
+ free(bdev_io);
+ }
+ free_test_req(&req);
+
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ free(ch);
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_read_io(void)
+{
+ struct rpc_construct_raid_bdev req;
+ struct rpc_destroy_raid_bdev destroy_req;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint32_t i;
+ struct spdk_bdev_io *bdev_io;
+ uint32_t count;
+ uint64_t io_len;
+ uint64_t lba;
+
+ set_globals();
+ create_test_req(&req, "raid1", 0, true);
+ rpc_req = &req;
+ rpc_req_size = sizeof(req);
+ CU_ASSERT(raid_bdev_init() == 0);
+ verify_raid_config_present(req.name, false);
+ verify_raid_bdev_present(req.name, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, req.name) == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(ch);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == (void *)0x1);
+ }
+ free_test_req(&req);
+
+ lba = 0;
+ for (count = 0; count < g_max_qd; count++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = (rand() % g_strip_size) + 1;
+ bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_READ);
+ lba += g_strip_size;
+ memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
+ g_child_io_status_flag);
+ bdev_io_cleanup(bdev_io);
+ free(bdev_io);
+ }
+
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ free(ch);
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+/* Test IO failures */
+static void
+test_io_failure(void)
+{
+ struct rpc_construct_raid_bdev req;
+ struct rpc_destroy_raid_bdev destroy_req;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint32_t i;
+ struct spdk_bdev_io *bdev_io;
+ uint32_t count;
+ uint64_t io_len;
+ uint64_t lba;
+
+ set_globals();
+ create_test_req(&req, "raid1", 0, true);
+ rpc_req = &req;
+ rpc_req_size = sizeof(req);
+ CU_ASSERT(raid_bdev_init() == 0);
+ verify_raid_config_present(req.name, false);
+ verify_raid_bdev_present(req.name, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, req.name) == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(ch);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ CU_ASSERT(ch_ctx->base_channel && ch_ctx->base_channel[i] == (void *)0x1);
+ }
+ free_test_req(&req);
+
+ lba = 0;
+ for (count = 0; count < 1; count++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = (rand() % g_strip_size) + 1;
+ bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_INVALID);
+ lba += g_strip_size;
+ memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
+ INVALID_IO_SUBMIT);
+ bdev_io_cleanup(bdev_io);
+ free(bdev_io);
+ }
+
+
+ lba = 0;
+ g_child_io_status_flag = false;
+ for (count = 0; count < 1; count++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = (rand() % g_strip_size) + 1;
+ bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
+ lba += g_strip_size;
+ memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_bdev_submit_request(ch, bdev_io);
+ verify_io(bdev_io, req.base_bdevs.num_base_bdevs, ch_ctx, pbdev,
+ g_child_io_status_flag);
+ bdev_io_cleanup(bdev_io);
+ free(bdev_io);
+ }
+
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ free(ch);
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+/* Test waitq logic */
+static void
+test_io_waitq(void)
+{
+ struct rpc_construct_raid_bdev req;
+ struct rpc_destroy_raid_bdev destroy_req;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx;
+ uint32_t i;
+ struct spdk_bdev_io *bdev_io;
+ struct spdk_bdev_io *bdev_io_next;
+ uint32_t count;
+ uint64_t io_len;
+ uint64_t lba;
+ TAILQ_HEAD(, spdk_bdev_io) head_io;
+
+ set_globals();
+ create_test_req(&req, "raid1", 0, true);
+ rpc_req = &req;
+ rpc_req_size = sizeof(req);
+ CU_ASSERT(raid_bdev_init() == 0);
+ verify_raid_config_present(req.name, false);
+ verify_raid_bdev_present(req.name, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, req.name) == 0) {
+ break;
+ }
+ }
+ SPDK_CU_ASSERT_FATAL(pbdev != NULL);
+ ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(ch);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ SPDK_CU_ASSERT_FATAL(ch_ctx->base_channel != NULL);
+ for (i = 0; i < req.base_bdevs.num_base_bdevs; i++) {
+ CU_ASSERT(ch_ctx->base_channel[i] == (void *)0x1);
+ }
+ free_test_req(&req);
+
+ lba = 0;
+ TAILQ_INIT(&head_io);
+ for (count = 0; count < g_max_qd; count++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ TAILQ_INSERT_TAIL(&head_io, bdev_io, module_link);
+ io_len = (rand() % g_strip_size) + 1;
+ bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, SPDK_BDEV_IO_TYPE_WRITE);
+ g_bdev_io_submit_status = -ENOMEM;
+ lba += g_strip_size;
+ raid_bdev_submit_request(ch, bdev_io);
+ }
+
+ g_ignore_io_output = 1;
+
+ count = get_num_elts_in_waitq();
+ CU_ASSERT(count == g_max_qd);
+ g_bdev_io_submit_status = 0;
+ process_io_waitq();
+ CU_ASSERT(TAILQ_EMPTY(&g_io_waitq));
+
+ TAILQ_FOREACH_SAFE(bdev_io, &head_io, module_link, bdev_io_next) {
+ bdev_io_cleanup(bdev_io);
+ free(bdev_io);
+ }
+
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ g_ignore_io_output = 0;
+ free(ch);
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+/* Create multiple raids, destroy raids without IO, get_raids related tests */
+static void
+test_multi_raid_no_io(void)
+{
+ struct rpc_construct_raid_bdev *construct_req;
+ struct rpc_destroy_raid_bdev destroy_req;
+ struct rpc_get_raid_bdevs get_raids_req;
+ uint32_t i;
+ char name[16];
+ uint32_t count;
+ uint32_t bbdev_idx = 0;
+
+ set_globals();
+ construct_req = calloc(MAX_RAIDS, sizeof(struct rpc_construct_raid_bdev));
+ SPDK_CU_ASSERT_FATAL(construct_req != NULL);
+ CU_ASSERT(raid_bdev_init() == 0);
+ for (i = 0; i < g_max_raids; i++) {
+ count = snprintf(name, 16, "%s%u", "raid", i);
+ name[count] = '\0';
+ create_test_req(&construct_req[i], name, bbdev_idx, true);
+ verify_raid_config_present(name, false);
+ verify_raid_bdev_present(name, false);
+ bbdev_idx += g_max_base_drives;
+ rpc_req = &construct_req[i];
+ rpc_req_size = sizeof(construct_req[0]);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&construct_req[i], true);
+ verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE);
+ }
+
+ get_raids_req.category = strdup("all");
+ rpc_req = &get_raids_req;
+ rpc_req_size = sizeof(get_raids_req);
+ g_rpc_err = 0;
+ g_test_multi_raids = 1;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_get_raid_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count);
+ for (i = 0; i < g_get_raids_count; i++) {
+ free(g_get_raids_output[i]);
+ }
+ g_get_raids_count = 0;
+
+ get_raids_req.category = strdup("online");
+ rpc_req = &get_raids_req;
+ rpc_req_size = sizeof(get_raids_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_get_raid_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_get_raids(construct_req, g_max_raids, g_get_raids_output, g_get_raids_count);
+ for (i = 0; i < g_get_raids_count; i++) {
+ free(g_get_raids_output[i]);
+ }
+ g_get_raids_count = 0;
+
+ get_raids_req.category = strdup("configuring");
+ rpc_req = &get_raids_req;
+ rpc_req_size = sizeof(get_raids_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_get_raid_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ CU_ASSERT(g_get_raids_count == 0);
+
+ get_raids_req.category = strdup("offline");
+ rpc_req = &get_raids_req;
+ rpc_req_size = sizeof(get_raids_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_get_raid_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ CU_ASSERT(g_get_raids_count == 0);
+
+ get_raids_req.category = strdup("invalid_category");
+ rpc_req = &get_raids_req;
+ rpc_req_size = sizeof(get_raids_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_get_raid_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ CU_ASSERT(g_get_raids_count == 0);
+
+ get_raids_req.category = strdup("all");
+ rpc_req = &get_raids_req;
+ rpc_req_size = sizeof(get_raids_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_err = 1;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_get_raid_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 1);
+ g_json_decode_obj_err = 0;
+ free(get_raids_req.category);
+ CU_ASSERT(g_get_raids_count == 0);
+
+ get_raids_req.category = strdup("all");
+ rpc_req = &get_raids_req;
+ rpc_req_size = sizeof(get_raids_req);
+ g_rpc_err = 0;
+ g_json_beg_res_ret_err = 1;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_get_raid_bdevs(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ g_json_beg_res_ret_err = 0;
+ CU_ASSERT(g_get_raids_count == 0);
+
+ for (i = 0; i < g_max_raids; i++) {
+ SPDK_CU_ASSERT_FATAL(construct_req[i].name != NULL);
+ destroy_req.name = strdup(construct_req[i].name);
+ count = snprintf(name, 16, "%s", destroy_req.name);
+ name[count] = '\0';
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present(name, false);
+ verify_raid_bdev_present(name, false);
+ }
+ g_test_multi_raids = 0;
+ raid_bdev_exit();
+ for (i = 0; i < g_max_raids; i++) {
+ free_test_req(&construct_req[i]);
+ }
+ free(construct_req);
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+/* Create multiple raids, fire IOs randomly on various raids */
+static void
+test_multi_raid_with_io(void)
+{
+ struct rpc_construct_raid_bdev *construct_req;
+ struct rpc_destroy_raid_bdev destroy_req;
+ uint32_t i, j;
+ char name[16];
+ uint32_t count;
+ uint32_t bbdev_idx = 0;
+ struct raid_bdev *pbdev;
+ struct spdk_io_channel *ch;
+ struct raid_bdev_io_channel *ch_ctx;
+ struct spdk_bdev_io *bdev_io;
+ uint64_t io_len;
+ uint64_t lba;
+ struct spdk_io_channel *ch_random;
+ struct raid_bdev_io_channel *ch_ctx_random;
+ int16_t iotype;
+ uint32_t raid_random;
+
+ set_globals();
+ construct_req = calloc(g_max_raids, sizeof(struct rpc_construct_raid_bdev));
+ SPDK_CU_ASSERT_FATAL(construct_req != NULL);
+ CU_ASSERT(raid_bdev_init() == 0);
+ ch = calloc(g_max_raids, sizeof(struct spdk_io_channel) + sizeof(struct raid_bdev_io_channel));
+ SPDK_CU_ASSERT_FATAL(ch != NULL);
+ for (i = 0; i < g_max_raids; i++) {
+ count = snprintf(name, 16, "%s%u", "raid", i);
+ name[count] = '\0';
+ create_test_req(&construct_req[i], name, bbdev_idx, true);
+ verify_raid_config_present(name, false);
+ verify_raid_bdev_present(name, false);
+ bbdev_idx += g_max_base_drives;
+ rpc_req = &construct_req[i];
+ rpc_req_size = sizeof(construct_req[0]);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&construct_req[i], true);
+ verify_raid_bdev(&construct_req[i], true, RAID_BDEV_STATE_ONLINE);
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(&ch[i]);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+ CU_ASSERT(raid_bdev_create_cb(pbdev, ch_ctx) == 0);
+ CU_ASSERT(ch_ctx->base_channel != NULL);
+ for (j = 0; j < construct_req[i].base_bdevs.num_base_bdevs; j++) {
+ CU_ASSERT(ch_ctx->base_channel[j] == (void *)0x1);
+ }
+ }
+
+ lba = 0;
+ for (count = 0; count < g_max_qd; count++) {
+ bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ io_len = (rand() % g_strip_size) + 1;
+ iotype = (rand() % 2) ? SPDK_BDEV_IO_TYPE_WRITE : SPDK_BDEV_IO_TYPE_READ;
+ memset(g_io_output, 0, (g_max_io_size / g_strip_size) + 1 * sizeof(struct io_output));
+ g_io_output_index = 0;
+ raid_random = rand() % g_max_raids;
+ ch_random = &ch[raid_random];
+ ch_ctx_random = spdk_io_channel_get_ctx(ch_random);
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, construct_req[raid_random].name) == 0) {
+ break;
+ }
+ }
+ bdev_io_initialize(bdev_io, &pbdev->bdev, lba, io_len, iotype);
+ lba += g_strip_size;
+ CU_ASSERT(pbdev != NULL);
+ raid_bdev_submit_request(ch_random, bdev_io);
+ verify_io(bdev_io, g_max_base_drives, ch_ctx_random, pbdev,
+ g_child_io_status_flag);
+ bdev_io_cleanup(bdev_io);
+ free(bdev_io);
+ }
+
+ for (i = 0; i < g_max_raids; i++) {
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, construct_req[i].name) == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+ ch_ctx = spdk_io_channel_get_ctx(&ch[i]);
+ SPDK_CU_ASSERT_FATAL(ch_ctx != NULL);
+ raid_bdev_destroy_cb(pbdev, ch_ctx);
+ CU_ASSERT(ch_ctx->base_channel == NULL);
+ destroy_req.name = strdup(construct_req[i].name);
+ count = snprintf(name, 16, "%s", destroy_req.name);
+ name[count] = '\0';
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present(name, false);
+ verify_raid_bdev_present(name, false);
+ }
+ raid_bdev_exit();
+ for (i = 0; i < g_max_raids; i++) {
+ free_test_req(&construct_req[i]);
+ }
+ free(construct_req);
+ free(ch);
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_io_type_supported(void)
+{
+ CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_READ) == true);
+ CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_WRITE) == true);
+ CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_FLUSH) == true);
+ CU_ASSERT(raid_bdev_io_type_supported(NULL, SPDK_BDEV_IO_TYPE_INVALID) == false);
+}
+
+static void
+test_create_raid_from_config(void)
+{
+ struct rpc_construct_raid_bdev req;
+ struct spdk_bdev *bdev;
+ struct rpc_destroy_raid_bdev destroy_req;
+ bool can_claim;
+ struct raid_bdev_config *raid_cfg;
+ uint32_t base_bdev_slot;
+
+ set_globals();
+ create_test_req(&req, "raid1", 0, true);
+ rpc_req = &req;
+ rpc_req_size = sizeof(req);
+ g_config_level_create = 1;
+ CU_ASSERT(raid_bdev_init() == 0);
+ g_config_level_create = 0;
+
+ verify_raid_config_present("raid1", true);
+ verify_raid_bdev_present("raid1", true);
+
+ TAILQ_FOREACH(bdev, &g_bdev_list, internal.link) {
+ raid_bdev_examine(bdev);
+ }
+
+ can_claim = raid_bdev_can_claim_bdev("Invalid", &raid_cfg, &base_bdev_slot);
+ CU_ASSERT(can_claim == false);
+
+ verify_raid_config(&req, true);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ free_test_req(&req);
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_create_raid_from_config_invalid_params(void)
+{
+ struct rpc_construct_raid_bdev req;
+ uint8_t count;
+
+ set_globals();
+ rpc_req = &req;
+ rpc_req_size = sizeof(req);
+ g_config_level_create = 1;
+
+ create_test_req(&req, "raid1", 0, true);
+ free(req.name);
+ req.name = NULL;
+ CU_ASSERT(raid_bdev_init() != 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_test_req(&req, "raid1", 0, false);
+ req.strip_size = 1234;
+ CU_ASSERT(raid_bdev_init() != 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_test_req(&req, "raid1", 0, false);
+ req.raid_level = 1;
+ CU_ASSERT(raid_bdev_init() != 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_test_req(&req, "raid1", 0, false);
+ req.raid_level = 1;
+ CU_ASSERT(raid_bdev_init() != 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_test_req(&req, "raid1", 0, false);
+ req.base_bdevs.num_base_bdevs++;
+ CU_ASSERT(raid_bdev_init() != 0);
+ req.base_bdevs.num_base_bdevs--;
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ create_test_req(&req, "raid1", 0, false);
+ req.base_bdevs.num_base_bdevs--;
+ CU_ASSERT(raid_bdev_init() != 0);
+ req.base_bdevs.num_base_bdevs++;
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ if (g_max_base_drives > 1) {
+ create_test_req(&req, "raid1", 0, false);
+ count = snprintf(req.base_bdevs.base_bdevs[g_max_base_drives - 1], 15, "%s", "Nvme0n1");
+ req.base_bdevs.base_bdevs[g_max_base_drives - 1][count] = '\0';
+ CU_ASSERT(raid_bdev_init() != 0);
+ free_test_req(&req);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+ }
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_raid_json_dump_info(void)
+{
+ struct rpc_construct_raid_bdev req;
+ struct rpc_destroy_raid_bdev destroy_req;
+ struct raid_bdev *pbdev;
+
+ set_globals();
+ create_test_req(&req, "raid1", 0, true);
+ rpc_req = &req;
+ rpc_req_size = sizeof(req);
+ CU_ASSERT(raid_bdev_init() == 0);
+
+ verify_raid_config_present(req.name, false);
+ verify_raid_bdev_present(req.name, false);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_bdev(&req, true, RAID_BDEV_STATE_ONLINE);
+
+ TAILQ_FOREACH(pbdev, &g_spdk_raid_bdev_list, global_link) {
+ if (strcmp(pbdev->bdev.name, req.name) == 0) {
+ break;
+ }
+ }
+ CU_ASSERT(pbdev != NULL);
+
+ CU_ASSERT(raid_bdev_dump_info_json(pbdev, NULL) == 0);
+
+ free_test_req(&req);
+
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+static void
+test_context_size(void)
+{
+ CU_ASSERT(raid_bdev_get_ctx_size() == sizeof(struct raid_bdev_io));
+}
+
+static void
+test_asym_base_drives_blockcnt(void)
+{
+ struct rpc_construct_raid_bdev construct_req;
+ struct rpc_destroy_raid_bdev destroy_req;
+ struct spdk_bdev *bbdev;
+ uint32_t i;
+
+ set_globals();
+ create_test_req(&construct_req, "raid1", 0, true);
+ rpc_req = &construct_req;
+ rpc_req_size = sizeof(construct_req);
+ CU_ASSERT(raid_bdev_init() == 0);
+ verify_raid_config_present(construct_req.name, false);
+ verify_raid_bdev_present(construct_req.name, false);
+ g_rpc_err = 0;
+ for (i = 0; i < construct_req.base_bdevs.num_base_bdevs; i++) {
+ bbdev = spdk_bdev_get_by_name(construct_req.base_bdevs.base_bdevs[i]);
+ SPDK_CU_ASSERT_FATAL(bbdev != NULL);
+ bbdev->blockcnt = rand() + 1;
+ }
+ g_json_decode_obj_construct = 1;
+ spdk_rpc_construct_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config(&construct_req, true);
+ verify_raid_bdev(&construct_req, true, RAID_BDEV_STATE_ONLINE);
+ free_test_req(&construct_req);
+
+ destroy_req.name = strdup("raid1");
+ rpc_req = &destroy_req;
+ rpc_req_size = sizeof(destroy_req);
+ g_rpc_err = 0;
+ g_json_decode_obj_construct = 0;
+ spdk_rpc_destroy_raid_bdev(NULL, NULL);
+ CU_ASSERT(g_rpc_err == 0);
+ verify_raid_config_present("raid1", false);
+ verify_raid_bdev_present("raid1", false);
+
+ raid_bdev_exit();
+ base_bdevs_cleanup();
+ reset_globals();
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("raid", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test_construct_raid", test_construct_raid) == NULL ||
+ CU_add_test(suite, "test_destroy_raid", test_destroy_raid) == NULL ||
+ CU_add_test(suite, "test_construct_raid_invalid_args", test_construct_raid_invalid_args) == NULL ||
+ CU_add_test(suite, "test_destroy_raid_invalid_args", test_destroy_raid_invalid_args) == NULL ||
+ CU_add_test(suite, "test_io_channel", test_io_channel) == NULL ||
+ CU_add_test(suite, "test_write_io", test_write_io) == NULL ||
+ CU_add_test(suite, "test_read_io", test_read_io) == NULL ||
+ CU_add_test(suite, "test_io_failure", test_io_failure) == NULL ||
+ CU_add_test(suite, "test_io_waitq", test_io_waitq) == NULL ||
+ CU_add_test(suite, "test_multi_raid_no_io", test_multi_raid_no_io) == NULL ||
+ CU_add_test(suite, "test_multi_raid_with_io", test_multi_raid_with_io) == NULL ||
+ CU_add_test(suite, "test_io_type_supported", test_io_type_supported) == NULL ||
+ CU_add_test(suite, "test_create_raid_from_config", test_create_raid_from_config) == NULL ||
+ CU_add_test(suite, "test_create_raid_from_config_invalid_params",
+ test_create_raid_from_config_invalid_params) == NULL ||
+ CU_add_test(suite, "test_raid_json_dump_info", test_raid_json_dump_info) == NULL ||
+ CU_add_test(suite, "test_context_size", test_context_size) == NULL ||
+ CU_add_test(suite, "test_asym_base_drives_blockcnt", test_asym_base_drives_blockcnt) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ set_test_opts();
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/.gitignore b/src/spdk/test/unit/lib/bdev/crypto.c/.gitignore
new file mode 100644
index 00000000..b2777562
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/.gitignore
@@ -0,0 +1 @@
+crypto_ut
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/Makefile b/src/spdk/test/unit/lib/bdev/crypto.c/Makefile
new file mode 100644
index 00000000..3241464b
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/Makefile
@@ -0,0 +1,41 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = crypto_ut.c
+CFLAGS += $(ENV_CFLAGS)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c b/src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c
new file mode 100644
index 00000000..f01aba19
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/crypto_ut.c
@@ -0,0 +1,908 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+#include "spdk_internal/mock.h"
+#include "unit/lib/json_mock.c"
+
+/* these rte_ headers are our local copies of the DPDK headers hacked to mock some functions
+ * included in them that can't be done with our mock library.
+ */
+#include "rte_crypto.h"
+#include "rte_cryptodev.h"
+DEFINE_STUB_V(rte_crypto_op_free, (struct rte_crypto_op *op));
+#include "bdev/crypto/vbdev_crypto.c"
+
+/* SPDK stubs */
+DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *,
+ (struct spdk_conf *cp, const char *name), NULL);
+DEFINE_STUB(spdk_conf_section_get_nval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx), NULL);
+DEFINE_STUB(spdk_conf_section_get_nmval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
+
+DEFINE_STUB_V(spdk_bdev_module_list_add, (struct spdk_bdev_module *bdev_module));
+DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *g_bdev_io));
+DEFINE_STUB(spdk_bdev_io_type_supported, bool, (struct spdk_bdev *bdev,
+ enum spdk_bdev_io_type io_type), 0);
+DEFINE_STUB_V(spdk_bdev_module_release_bdev, (struct spdk_bdev *bdev));
+DEFINE_STUB_V(spdk_bdev_close, (struct spdk_bdev_desc *desc));
+DEFINE_STUB(spdk_bdev_get_name, const char *, (const struct spdk_bdev *bdev), 0);
+DEFINE_STUB(spdk_env_get_current_core, uint32_t, (void), 0);
+DEFINE_STUB(spdk_bdev_get_io_channel, struct spdk_io_channel *, (struct spdk_bdev_desc *desc), 0);
+DEFINE_STUB_V(spdk_bdev_unregister, (struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn,
+ void *cb_arg));
+DEFINE_STUB(spdk_bdev_open, int, (struct spdk_bdev *bdev, bool write,
+ spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **_desc), 0);
+DEFINE_STUB(spdk_bdev_module_claim_bdev, int, (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
+ struct spdk_bdev_module *module), 0);
+DEFINE_STUB_V(spdk_bdev_module_examine_done, (struct spdk_bdev_module *module));
+DEFINE_STUB(spdk_vbdev_register, int, (struct spdk_bdev *vbdev, struct spdk_bdev **base_bdevs,
+ int base_bdev_count), 0);
+DEFINE_STUB(spdk_bdev_get_by_name, struct spdk_bdev *, (const char *bdev_name), NULL);
+DEFINE_STUB(spdk_env_get_socket_id, uint32_t, (uint32_t core), 0);
+
+/* DPDK stubs */
+DEFINE_STUB(rte_cryptodev_count, uint8_t, (void), 0);
+DEFINE_STUB(rte_eal_get_configuration, struct rte_config *, (void), NULL);
+DEFINE_STUB_V(rte_mempool_free, (struct rte_mempool *mp));
+DEFINE_STUB(rte_socket_id, unsigned, (void), 0);
+DEFINE_STUB(rte_crypto_op_pool_create, struct rte_mempool *,
+ (const char *name, enum rte_crypto_op_type type, unsigned nb_elts,
+ unsigned cache_size, uint16_t priv_size, int socket_id), (struct rte_mempool *)1);
+DEFINE_STUB(rte_cryptodev_device_count_by_driver, uint8_t, (uint8_t driver_id), 0);
+DEFINE_STUB(rte_cryptodev_socket_id, int, (uint8_t dev_id), 0);
+DEFINE_STUB(rte_cryptodev_configure, int, (uint8_t dev_id, struct rte_cryptodev_config *config), 0);
+DEFINE_STUB(rte_cryptodev_queue_pair_setup, int, (uint8_t dev_id, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool), 0);
+DEFINE_STUB(rte_cryptodev_start, int, (uint8_t dev_id), 0)
+DEFINE_STUB_V(rte_cryptodev_stop, (uint8_t dev_id));
+DEFINE_STUB(rte_cryptodev_sym_session_create, struct rte_cryptodev_sym_session *,
+ (struct rte_mempool *mempool), (struct rte_cryptodev_sym_session *)1);
+DEFINE_STUB(rte_cryptodev_sym_session_clear, int, (uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess), 0);
+DEFINE_STUB(rte_cryptodev_sym_session_free, int, (struct rte_cryptodev_sym_session *sess), 0);
+DEFINE_STUB(rte_cryptodev_sym_session_init, int, (uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_crypto_sym_xform *xforms, struct rte_mempool *mempool), 0);
+DEFINE_STUB(rte_vdev_init, int, (const char *name, const char *args), 0);
+void __attribute__((noreturn)) __rte_panic(const char *funcname, const char *format, ...)
+{
+ abort();
+}
+struct rte_mempool_ops_table rte_mempool_ops_table;
+struct rte_cryptodev *rte_cryptodevs;
+__thread unsigned per_lcore__lcore_id = 0;
+
+/* global vars and setup/cleanup functions used for all test functions */
+struct spdk_bdev_io *g_bdev_io;
+struct crypto_bdev_io *g_io_ctx;
+struct crypto_io_channel *g_crypto_ch;
+struct spdk_io_channel *g_io_ch;
+struct vbdev_dev g_device;
+struct vbdev_crypto g_crypto_bdev;
+struct rte_config *g_test_config;
+struct device_qp g_dev_qp;
+
+#define MAX_TEST_BLOCKS 8192
+struct rte_crypto_op *g_test_crypto_ops[MAX_TEST_BLOCKS];
+struct rte_crypto_op *g_test_dequeued_ops[MAX_TEST_BLOCKS];
+struct rte_crypto_op *g_test_dev_full_ops[MAX_TEST_BLOCKS];
+
+/* These globals are externs in our local rte_ header files so we can control
+ * specific functions for mocking.
+ */
+uint16_t g_dequeue_mock;
+uint16_t g_enqueue_mock;
+unsigned ut_rte_crypto_op_bulk_alloc;
+int ut_rte_crypto_op_attach_sym_session = 0;
+
+int ut_rte_cryptodev_info_get = 0;
+bool ut_rte_cryptodev_info_get_mocked = false;
+void
+rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
+{
+ dev_info->max_nb_queue_pairs = ut_rte_cryptodev_info_get;
+}
+
+unsigned int
+rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
+{
+ return (unsigned int)dev_id;
+}
+
+void
+spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
+{
+ cb(g_io_ch, g_bdev_io);
+}
+
+/* Mock these functions to call the callback and then return the value we require */
+int ut_spdk_bdev_readv_blocks = 0;
+bool ut_spdk_bdev_readv_blocks_mocked = false;
+int
+spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_readv_blocks, cb_arg);
+ return ut_spdk_bdev_readv_blocks;
+}
+
+int ut_spdk_bdev_writev_blocks = 0;
+bool ut_spdk_bdev_writev_blocks_mocked = false;
+int
+spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_writev_blocks, cb_arg);
+ return ut_spdk_bdev_writev_blocks;
+}
+
+int ut_spdk_bdev_unmap_blocks = 0;
+bool ut_spdk_bdev_unmap_blocks_mocked = false;
+int
+spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_unmap_blocks, cb_arg);
+ return ut_spdk_bdev_unmap_blocks;
+}
+
+int ut_spdk_bdev_flush_blocks = 0;
+bool ut_spdk_bdev_flush_blocks_mocked = false;
+int
+spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks, spdk_bdev_io_completion_cb cb,
+ void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_flush_blocks, cb_arg);
+ return ut_spdk_bdev_flush_blocks;
+}
+
+int ut_spdk_bdev_reset = 0;
+bool ut_spdk_bdev_reset_mocked = false;
+int
+spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ cb(g_bdev_io, !ut_spdk_bdev_reset, cb_arg);
+ return ut_spdk_bdev_reset;
+}
+
+bool g_completion_called = false;
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+ bdev_io->internal.status = status;
+ g_completion_called = true;
+}
+
+/* Used in testing device full condition */
+static inline uint16_t
+rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int i;
+
+ CU_ASSERT(nb_ops > 0);
+
+ for (i = 0; i < nb_ops; i++) {
+ /* Use this empty (til now) array of pointers to store
+ * enqueued operations for assertion in dev_full test.
+ */
+ g_test_dev_full_ops[i] = *ops++;
+ }
+
+ return g_enqueue_mock;
+}
+
+/* This is pretty ugly but in order to complete an IO via the
+ * poller in the submit path, we need to first call to this func
+ * to return the dequeued value and also decrement it. On the subsequent
+ * call it needs to return 0 to indicate to the caller that there are
+ * no more IOs to drain.
+ */
+int g_test_overflow = 0;
+static inline uint16_t
+rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ CU_ASSERT(nb_ops > 0);
+
+ /* A crypto device can be full on enqueue, the driver is designed to drain
+ * the device at the time by calling the poller until it's empty, then
+ * submitting the remaining crypto ops.
+ */
+ if (g_test_overflow) {
+ if (g_dequeue_mock == 0) {
+ return 0;
+ }
+ *ops = g_test_crypto_ops[g_enqueue_mock];
+ (*ops)->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ g_dequeue_mock -= 1;
+ }
+ return (g_dequeue_mock + 1);
+}
+
+/* Instead of allocating real memory, assign the allocations to our
+ * test array for assertion in tests.
+ */
+static inline unsigned
+rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ int i;
+
+ for (i = 0; i < nb_ops; i++) {
+ *ops++ = g_test_crypto_ops[i];
+ }
+ return ut_rte_crypto_op_bulk_alloc;
+}
+
+static __rte_always_inline void
+rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
+ unsigned int n)
+{
+ return;
+}
+
+static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
+{
+ return NULL;
+}
+
+
+static inline int
+rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
+ struct rte_cryptodev_sym_session *sess)
+{
+ return ut_rte_crypto_op_attach_sym_session;
+}
+
+/* Global setup for all tests that share a bunch of preparation... */
+static int
+test_setup(void)
+{
+ int i;
+
+ /* Prepare essential variables for test routines */
+ g_bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct crypto_bdev_io));
+ g_bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec) * 128);
+ g_bdev_io->bdev = &g_crypto_bdev.crypto_bdev;
+ g_io_ch = calloc(1, sizeof(struct spdk_io_channel) + sizeof(struct crypto_io_channel));
+ g_crypto_ch = (struct crypto_io_channel *)((uint8_t *)g_io_ch + sizeof(struct spdk_io_channel));
+ g_io_ctx = (struct crypto_bdev_io *)g_bdev_io->driver_ctx;
+ memset(&g_device, 0, sizeof(struct vbdev_dev));
+ memset(&g_crypto_bdev, 0, sizeof(struct vbdev_crypto));
+ g_dev_qp.device = &g_device;
+ g_io_ctx->crypto_ch = g_crypto_ch;
+ g_io_ctx->crypto_bdev = &g_crypto_bdev;
+ g_crypto_ch->device_qp = &g_dev_qp;
+ g_test_config = calloc(1, sizeof(struct rte_config));
+ g_test_config->lcore_count = 1;
+
+ /* Allocate a real mbuf pool so we can test error paths */
+ g_mbuf_mp = spdk_mempool_create("mbuf_mp", NUM_MBUFS, sizeof(struct rte_mbuf),
+ SPDK_MEMPOOL_DEFAULT_CACHE_SIZE,
+ SPDK_ENV_SOCKET_ID_ANY);
+
+ /* Instead of allocating real rte mempools for these, it's easier and provides the
+ * same coverage just calloc them here.
+ */
+ for (i = 0; i < MAX_TEST_BLOCKS; i++) {
+ g_test_crypto_ops[i] = calloc(1, sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op));
+ g_test_dequeued_ops[i] = calloc(1, sizeof(struct rte_crypto_op) +
+ sizeof(struct rte_crypto_sym_op));
+ }
+ return 0;
+}
+
+/* Global teardown for all tests */
+static int
+test_cleanup(void)
+{
+ int i;
+
+ free(g_test_config);
+ spdk_mempool_free(g_mbuf_mp);
+ for (i = 0; i < MAX_TEST_BLOCKS; i++) {
+ free(g_test_crypto_ops[i]);
+ free(g_test_dequeued_ops[i]);
+ }
+ free(g_bdev_io->u.bdev.iovs);
+ free(g_bdev_io);
+ free(g_io_ch);
+ return 0;
+}
+
+static void
+test_error_paths(void)
+{
+ /* Single element block size write, just to test error paths
+ * in vbdev_crypto_submit_request().
+ */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = 1;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_crypto_bdev.crypto_bdev.blocklen = 512;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
+
+ /* test failure of spdk_mempool_get_bulk() */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ MOCK_SET(spdk_mempool_get, NULL);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+
+ /* same thing but switch to reads to test error path in _crypto_complete_io() */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ /* Now with the read_blocks failing */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ MOCK_SET(spdk_bdev_readv_blocks, -1);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_SET(spdk_bdev_readv_blocks, 0);
+ MOCK_CLEAR(spdk_mempool_get);
+
+ /* test failure of rte_crypto_op_bulk_alloc() */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ ut_rte_crypto_op_bulk_alloc = 0;
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ ut_rte_crypto_op_bulk_alloc = 1;
+
+ /* test failure of rte_cryptodev_sym_session_create() */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ MOCK_SET(rte_cryptodev_sym_session_create, NULL);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_SET(rte_cryptodev_sym_session_create, (struct rte_cryptodev_sym_session *)1);
+
+ /* test failure of rte_cryptodev_sym_session_init() */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ MOCK_SET(rte_cryptodev_sym_session_init, -1);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_SET(rte_cryptodev_sym_session_init, 0);
+
+ /* test failure of rte_crypto_op_attach_sym_session() */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ ut_rte_crypto_op_attach_sym_session = -1;
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ ut_rte_crypto_op_attach_sym_session = 0;
+}
+
+static void
+test_simple_write(void)
+{
+ /* Single element block size write */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = 1;
+ g_bdev_io->u.bdev.offset_blocks = 0;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_write;
+ g_crypto_bdev.crypto_bdev.blocklen = 512;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
+ CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT);
+ CU_ASSERT(g_io_ctx->cry_iov.iov_len == 512);
+ CU_ASSERT(g_io_ctx->cry_iov.iov_base != NULL);
+ CU_ASSERT(g_io_ctx->cry_offset_blocks == 0);
+ CU_ASSERT(g_io_ctx->cry_num_blocks == 1);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_write);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->buf_addr != NULL);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst->data_len == 512);
+
+ spdk_dma_free(g_io_ctx->cry_iov.iov_base);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_dst);
+}
+
+static void
+test_simple_read(void)
+{
+ /* Single element block size read */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = 1;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_simple_read;
+ g_crypto_bdev.crypto_bdev.blocklen = 512;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = 1;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
+ CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_DECRYPT);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->buf_addr == &test_simple_read);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->data_len == 512);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.length == 512);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[0]->sym->m_dst == NULL);
+
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
+}
+
+static void
+test_large_rw(void)
+{
+ unsigned block_len = 512;
+ unsigned num_blocks = CRYPTO_MAX_IO / block_len;
+ unsigned io_len = block_len * num_blocks;
+ unsigned i;
+
+ /* Multi block size read, multi-element */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
+ CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_DECRYPT);
+
+ for (i = 0; i < num_blocks; i++) {
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
+ }
+
+ /* Multi block size write, multi-element */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_large_rw;
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == (int)num_blocks);
+ CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT);
+
+ for (i = 0; i < num_blocks; i++) {
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_large_rw + (i * block_len));
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_io_ctx->cry_iov.iov_len == io_len);
+ CU_ASSERT(g_io_ctx->cry_iov.iov_base != NULL);
+ CU_ASSERT(g_io_ctx->cry_offset_blocks == 0);
+ CU_ASSERT(g_io_ctx->cry_num_blocks == num_blocks);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->buf_addr != NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst->data_len == block_len);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
+ }
+ spdk_dma_free(g_io_ctx->cry_iov.iov_base);
+}
+
+static void
+test_dev_full(void)
+{
+ unsigned block_len = 512;
+ unsigned num_blocks = 2;
+ unsigned io_len = block_len * num_blocks;
+ unsigned i;
+
+ g_test_overflow = 1;
+
+ /* Multi block size read, multi-element */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 1;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = io_len;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_dev_full;
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_enqueue_mock = g_dequeue_mock = 1;
+ ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* this test only completes one of the 2 IOs (in the drain path) */
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == 1);
+ CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_DECRYPT);
+
+ for (i = 0; i < num_blocks; i++) {
+ /* One of the src_mbufs was freed because of the device full condition so
+ * we can't assert its value here.
+ */
+ CU_ASSERT(g_test_dev_full_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_dev_full_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_dev_full_ops[i]->sym->m_src == g_test_dev_full_ops[i]->sym->m_src);
+ CU_ASSERT(g_test_dev_full_ops[i]->sym->m_dst == NULL);
+ }
+
+ /* Only one of the 2 blocks in the test was freed on completion by design, so
+ * we need to free th other one here.
+ */
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[0]->sym->m_src);
+ g_test_overflow = 0;
+}
+
+static void
+test_crazy_rw(void)
+{
+ unsigned block_len = 512;
+ int num_blocks = 4;
+ int i;
+
+ /* Multi block size read, single element, strange IOV makeup */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 3;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
+ g_bdev_io->u.bdev.iovs[1].iov_len = 1024;
+ g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 512;
+ g_bdev_io->u.bdev.iovs[2].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 512 + 1024;
+
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
+ CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_DECRYPT);
+
+ for (i = 0; i < num_blocks; i++) {
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == NULL);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
+ }
+
+ /* Multi block size write, single element strange IOV makeup */
+ num_blocks = 8;
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->u.bdev.iovcnt = 4;
+ g_bdev_io->u.bdev.num_blocks = num_blocks;
+ g_bdev_io->u.bdev.iovs[0].iov_len = 2048;
+ g_bdev_io->u.bdev.iovs[0].iov_base = &test_crazy_rw;
+ g_bdev_io->u.bdev.iovs[1].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[1].iov_base = &test_crazy_rw + 2048;
+ g_bdev_io->u.bdev.iovs[2].iov_len = 512;
+ g_bdev_io->u.bdev.iovs[2].iov_base = &test_crazy_rw + 2048 + 512;
+ g_bdev_io->u.bdev.iovs[3].iov_len = 1024;
+ g_bdev_io->u.bdev.iovs[3].iov_base = &test_crazy_rw + 2048 + 512 + 512;
+
+ g_crypto_bdev.crypto_bdev.blocklen = block_len;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_enqueue_mock = g_dequeue_mock = ut_rte_crypto_op_bulk_alloc = num_blocks;
+
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_io_ctx->cryop_cnt_remaining == num_blocks);
+ CU_ASSERT(g_io_ctx->crypto_op == RTE_CRYPTO_CIPHER_OP_ENCRYPT);
+
+ for (i = 0; i < num_blocks; i++) {
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->buf_addr == &test_crazy_rw + (i * block_len));
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->data_len == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->next == NULL);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.length == block_len);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->cipher.data.offset == 0);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src->userdata == g_bdev_io);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_src == g_test_crypto_ops[i]->sym->m_src);
+ CU_ASSERT(g_test_crypto_ops[i]->sym->m_dst == g_test_crypto_ops[i]->sym->m_dst);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_src);
+ spdk_mempool_put(g_mbuf_mp, g_test_crypto_ops[i]->sym->m_dst);
+ }
+ spdk_dma_free(g_io_ctx->cry_iov.iov_base);
+}
+
+static void
+test_passthru(void)
+{
+ /* Make sure these follow our completion callback, test success & fail. */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_UNMAP;
+ MOCK_SET(spdk_bdev_unmap_blocks, 0);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ MOCK_SET(spdk_bdev_unmap_blocks, -1);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_CLEAR(spdk_bdev_unmap_blocks);
+
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_FLUSH;
+ MOCK_SET(spdk_bdev_flush_blocks, 0);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ MOCK_SET(spdk_bdev_flush_blocks, -1);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_CLEAR(spdk_bdev_flush_blocks);
+
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
+ MOCK_SET(spdk_bdev_reset, 0);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ MOCK_SET(spdk_bdev_reset, -1);
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ MOCK_CLEAR(spdk_bdev_reset);
+
+ /* We should never get a WZ command, we report that we don't support it. */
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE_ZEROES;
+ vbdev_crypto_submit_request(g_io_ch, g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+}
+
+static void
+test_initdrivers(void)
+{
+ int rc;
+ static struct spdk_mempool *orig_mbuf_mp;
+ static struct spdk_mempool *orig_session_mp;
+
+ /* No drivers available, not an error though */
+ MOCK_SET(rte_eal_get_configuration, g_test_config);
+ MOCK_SET(rte_cryptodev_count, 0);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == 0);
+
+ /* Test failure of DPDK dev init. */
+ MOCK_SET(rte_cryptodev_count, 2);
+ MOCK_SET(rte_vdev_init, -1);
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == -EINVAL);
+ MOCK_SET(rte_vdev_init, 0);
+
+ /* Can't create session pool. */
+ MOCK_SET(spdk_mempool_create, NULL);
+ orig_mbuf_mp = g_mbuf_mp;
+ orig_session_mp = g_session_mp;
+ rc = vbdev_crypto_init_crypto_drivers();
+ g_mbuf_mp = orig_mbuf_mp;
+ g_session_mp = orig_session_mp;
+ CU_ASSERT(rc == -ENOMEM);
+ MOCK_CLEAR(spdk_mempool_create);
+
+ /* Can't create op pool. These tests will alloc and free our g_mbuf_mp
+ * so save that off here and restore it after each test is over.
+ */
+ orig_mbuf_mp = g_mbuf_mp;
+ orig_session_mp = g_session_mp;
+ MOCK_SET(rte_crypto_op_pool_create, NULL);
+ rc = vbdev_crypto_init_crypto_drivers();
+ g_mbuf_mp = orig_mbuf_mp;
+ g_session_mp = orig_session_mp;
+ CU_ASSERT(rc == -ENOMEM);
+ MOCK_SET(rte_crypto_op_pool_create, (struct rte_mempool *)1);
+
+ /* Check resources are sufficient failure. */
+ orig_mbuf_mp = g_mbuf_mp;
+ orig_session_mp = g_session_mp;
+ rc = vbdev_crypto_init_crypto_drivers();
+ g_mbuf_mp = orig_mbuf_mp;
+ g_session_mp = orig_session_mp;
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Test crypto dev configure failure. */
+ MOCK_SET(rte_cryptodev_device_count_by_driver, 2);
+ MOCK_SET(rte_cryptodev_info_get, 1);
+ MOCK_SET(rte_cryptodev_configure, -1);
+ orig_mbuf_mp = g_mbuf_mp;
+ orig_session_mp = g_session_mp;
+ rc = vbdev_crypto_init_crypto_drivers();
+ g_mbuf_mp = orig_mbuf_mp;
+ g_session_mp = orig_session_mp;
+ MOCK_SET(rte_cryptodev_configure, 0);
+ CU_ASSERT(rc == -EINVAL);
+
+ /* Test failure of qp setup. */
+ MOCK_SET(rte_cryptodev_queue_pair_setup, -1);
+ orig_mbuf_mp = g_mbuf_mp;
+ orig_session_mp = g_session_mp;
+ rc = vbdev_crypto_init_crypto_drivers();
+ g_mbuf_mp = orig_mbuf_mp;
+ g_session_mp = orig_session_mp;
+ CU_ASSERT(rc == -EINVAL);
+ MOCK_SET(rte_cryptodev_queue_pair_setup, 0);
+
+ /* Test failure of dev start. */
+ MOCK_SET(rte_cryptodev_start, -1);
+ orig_mbuf_mp = g_mbuf_mp;
+ orig_session_mp = g_session_mp;
+ rc = vbdev_crypto_init_crypto_drivers();
+ g_mbuf_mp = orig_mbuf_mp;
+ g_session_mp = orig_session_mp;
+ CU_ASSERT(rc == -EINVAL);
+ MOCK_SET(rte_cryptodev_start, 0);
+
+ /* Test happy path. */
+ rc = vbdev_crypto_init_crypto_drivers();
+ CU_ASSERT(rc == 0);
+}
+
+static void
+test_crypto_op_complete(void)
+{
+ /* Make sure completion code respects failure. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_FAILED;
+ g_completion_called = false;
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_completion_called == true);
+
+ /* Test read completion. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_completion_called = false;
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_completion_called == true);
+
+ /* Test write completion success. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_completion_called = false;
+ MOCK_SET(spdk_bdev_writev_blocks, 0);
+ /* Code under test will free this, if not ASAN will complain. */
+ g_io_ctx->cry_iov.iov_base = spdk_dma_malloc(16, 0x10, NULL);
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_completion_called == true);
+
+ /* Test write completion failed. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_WRITE;
+ g_completion_called = false;
+ MOCK_SET(spdk_bdev_writev_blocks, -1);
+ /* Code under test will free this, if not ASAN will complain. */
+ g_io_ctx->cry_iov.iov_base = spdk_dma_malloc(16, 0x10, NULL);
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_completion_called == true);
+
+ /* Test bogus type for this completion. */
+ g_bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ g_bdev_io->type = SPDK_BDEV_IO_TYPE_RESET;
+ g_completion_called = false;
+ _crypto_operation_complete(g_bdev_io);
+ CU_ASSERT(g_bdev_io->internal.status == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_completion_called == true);
+}
+
+static void
+test_supported_io(void)
+{
+ void *ctx = NULL;
+ bool rc = true;
+
+ /* Make sure we always report false to WZ, we need the bdev layer to
+ * send real 0's so we can encrypt/decrypt them.
+ */
+ rc = vbdev_crypto_io_type_supported(ctx, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
+ CU_ASSERT(rc == false);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("crypto", test_setup, test_cleanup);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (CU_add_test(suite, "test_error_paths",
+ test_error_paths) == NULL ||
+ CU_add_test(suite, "test_simple_write",
+ test_simple_write) == NULL ||
+ CU_add_test(suite, "test_simple_read",
+ test_simple_read) == NULL ||
+ CU_add_test(suite, "test_large_rw",
+ test_large_rw) == NULL ||
+ CU_add_test(suite, "test_dev_full",
+ test_dev_full) == NULL ||
+ CU_add_test(suite, "test_crazy_rw",
+ test_crazy_rw) == NULL ||
+ CU_add_test(suite, "test_passthru",
+ test_passthru) == NULL ||
+ CU_add_test(suite, "test_initdrivers",
+ test_initdrivers) == NULL ||
+ CU_add_test(suite, "test_crypto_op_complete",
+ test_crypto_op_complete) == NULL ||
+ CU_add_test(suite, "test_supported_io",
+ test_supported_io) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/rte_crypto.h b/src/spdk/test/unit/lib/bdev/crypto.c/rte_crypto.h
new file mode 100644
index 00000000..a53a71df
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/rte_crypto.h
@@ -0,0 +1,95 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * Copyright(c) 2016 6WIND S.A.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUcryptoION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTO_H_
+#define _RTE_CRYPTO_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* In order to mock some DPDK functions, we place headers here with the name name as the DPDK headers
+ * so these definitions wil be picked up. Only what's mocked is included.
+ */
+
+#include "rte_mbuf.h"
+#include "rte_mempool.h"
+#include "rte_crypto_sym.h"
+
+enum rte_crypto_op_type {
+ RTE_CRYPTO_OP_TYPE_UNDEFINED,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+};
+
+enum rte_crypto_op_status {
+ RTE_CRYPTO_OP_STATUS_SUCCESS,
+ RTE_CRYPTO_OP_STATUS_NOT_PROCESSED,
+ RTE_CRYPTO_OP_STATUS_AUTH_FAILED,
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION,
+ RTE_CRYPTO_OP_STATUS_INVALID_ARGS,
+ RTE_CRYPTO_OP_STATUS_ERROR,
+};
+
+struct rte_crypto_op {
+ uint8_t type;
+ uint8_t status;
+ uint8_t sess_type;
+ uint8_t reserved[5];
+ struct rte_mempool *mempool;
+ rte_iova_t phys_addr;
+ __extension__
+ union {
+ struct rte_crypto_sym_op sym[0];
+ };
+};
+
+extern struct rte_mempool *
+rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type,
+ unsigned nb_elts, unsigned cache_size, uint16_t priv_size,
+ int socket_id);
+
+static inline unsigned
+rte_crypto_op_bulk_alloc(struct rte_mempool *mempool,
+ enum rte_crypto_op_type type,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
+
+static inline int
+rte_crypto_op_attach_sym_session(struct rte_crypto_op *op,
+ struct rte_cryptodev_sym_session *sess);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/rte_cryptodev.h b/src/spdk/test/unit/lib/bdev/crypto.c/rte_cryptodev.h
new file mode 100644
index 00000000..b941a20d
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/rte_cryptodev.h
@@ -0,0 +1,153 @@
+/*-
+ *
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
+ * Copyright 2014 6WIND S.A.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTODEV_H_
+#define _RTE_CRYPTODEV_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* In order to mock some DPDK functions, we place headers here with the name name as the DPDK headers
+ * so these definitions wil be picked up. Only what's mocked is included.
+ */
+
+uint8_t dummy[16];
+#define rte_crypto_op_ctod_offset(c, t, o) &dummy[0]
+
+#define RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER (1ULL << 9)
+
+struct rte_cryptodev_info {
+ const char *driver_name;
+ uint8_t driver_id;
+ struct rte_pci_device *pci_dev;
+ uint64_t feature_flags;
+ const struct rte_cryptodev_capabilities *capabilities;
+ unsigned max_nb_queue_pairs;
+ struct {
+ unsigned max_nb_sessions;
+ unsigned int max_nb_sessions_per_qp;
+ } sym;
+};
+
+enum rte_cryptodev_event_type {
+ RTE_CRYPTODEV_EVENT_UNKNOWN,
+ RTE_CRYPTODEV_EVENT_ERROR,
+ RTE_CRYPTODEV_EVENT_MAX
+};
+
+struct rte_cryptodev_qp_conf {
+ uint32_t nb_descriptors;
+};
+
+struct rte_cryptodev_stats {
+ uint64_t enqueued_count;
+ uint64_t dequeued_count;
+ uint64_t enqueue_err_count;
+ uint64_t dequeue_err_count;
+};
+
+#define RTE_CRYPTODEV_NAME_MAX_LEN (64)
+
+extern uint8_t
+rte_cryptodev_count(void);
+
+extern uint8_t
+rte_cryptodev_device_count_by_driver(uint8_t driver_id);
+
+extern int
+rte_cryptodev_socket_id(uint8_t dev_id);
+
+struct rte_cryptodev_config {
+ int socket_id;
+ uint16_t nb_queue_pairs;
+};
+
+extern int
+rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
+
+extern int
+rte_cryptodev_start(uint8_t dev_id);
+
+extern void
+rte_cryptodev_stop(uint8_t dev_id);
+
+extern int
+rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf, int socket_id,
+ struct rte_mempool *session_pool);
+
+extern void
+rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
+
+static inline uint16_t
+rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
+
+static inline uint16_t
+rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
+ struct rte_crypto_op **ops, uint16_t nb_ops);
+
+struct rte_cryptodev_sym_session {
+ __extension__ void *sess_private_data[0];
+};
+
+struct rte_cryptodev_asym_session {
+ __extension__ void *sess_private_data[0];
+};
+
+struct rte_crypto_asym_xform;
+
+struct rte_cryptodev_sym_session *
+rte_cryptodev_sym_session_create(struct rte_mempool *mempool);
+
+int
+rte_cryptodev_sym_session_free(struct rte_cryptodev_sym_session *sess);
+
+int
+rte_cryptodev_sym_session_init(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_crypto_sym_xform *xforms,
+ struct rte_mempool *mempool);
+
+int
+rte_cryptodev_sym_session_clear(uint8_t dev_id,
+ struct rte_cryptodev_sym_session *sess);
+
+unsigned int
+rte_cryptodev_sym_get_private_session_size(uint8_t dev_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/rte_mbuf.h b/src/spdk/test/unit/lib/bdev/crypto.c/rte_mbuf.h
new file mode 100644
index 00000000..4d69f482
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/rte_mbuf.h
@@ -0,0 +1,148 @@
+/*-
+ *
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
+ * Copyright 2014 6WIND S.A.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MBUF_H_
+#define _RTE_MBUF_H_
+
+#include "rte_mempool.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* In order to mock some DPDK functions, we place headers here with the name name as the DPDK headers
+ * so these definitions wil be picked up. Only what's mocked is included.
+ */
+
+__extension__
+typedef void *MARKER[0];
+__extension__
+typedef uint8_t MARKER8[0];
+__extension__
+typedef uint64_t MARKER64[0];
+
+struct rte_mbuf {
+ MARKER cacheline0;
+ void *buf_addr;
+ RTE_STD_C11
+ union {
+ rte_iova_t buf_iova;
+ rte_iova_t buf_physaddr;
+ } __rte_aligned(sizeof(rte_iova_t));
+ MARKER64 rearm_data;
+ uint16_t data_off;
+ RTE_STD_C11
+ union {
+ rte_atomic16_t refcnt_atomic;
+ uint16_t refcnt;
+ };
+ uint16_t nb_segs;
+ uint16_t port;
+ uint64_t ol_flags;
+ MARKER rx_descriptor_fields1;
+ RTE_STD_C11
+ union {
+ uint32_t packet_type;
+ struct {
+ uint32_t l2_type: 4;
+ uint32_t l3_type: 4;
+ uint32_t l4_type: 4;
+ uint32_t tun_type: 4;
+ RTE_STD_C11
+ union {
+ uint8_t inner_esp_next_proto;
+ __extension__
+ struct {
+ uint8_t inner_l2_type: 4;
+ uint8_t inner_l3_type: 4;
+ };
+ };
+ uint32_t inner_l4_type: 4;
+ };
+ };
+ uint32_t pkt_len;
+ uint16_t data_len;
+ uint16_t vlan_tci;
+ union {
+ uint32_t rss;
+ struct {
+ RTE_STD_C11
+ union {
+ struct {
+ uint16_t hash;
+ uint16_t id;
+ };
+ uint32_t lo;
+ };
+ uint32_t hi;
+ } fdir;
+ struct {
+ uint32_t lo;
+ uint32_t hi;
+ } sched;
+ uint32_t usr;
+ } hash;
+ uint16_t vlan_tci_outer;
+ uint16_t buf_len;
+ uint64_t timestamp;
+ MARKER cacheline1 __rte_cache_min_aligned;
+ RTE_STD_C11
+ union {
+ void *userdata;
+ uint64_t udata64;
+ };
+ struct rte_mempool *pool;
+ struct rte_mbuf *next;
+ RTE_STD_C11
+ union {
+ uint64_t tx_offload;
+ __extension__
+ struct {
+ uint64_t l2_len: 7;
+ uint64_t l3_len: 9;
+ uint64_t l4_len: 8;
+ uint64_t tso_segsz: 16;
+ uint64_t outer_l3_len: 9;
+ uint64_t outer_l2_len: 7;
+ };
+ };
+ uint16_t priv_size;
+ uint16_t timesync;
+ uint32_t seqn;
+
+} __rte_cache_aligned;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/spdk/test/unit/lib/bdev/crypto.c/rte_mempool.h b/src/spdk/test/unit/lib/bdev/crypto.c/rte_mempool.h
new file mode 100644
index 00000000..5750d30f
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/crypto.c/rte_mempool.h
@@ -0,0 +1,145 @@
+/*-
+ *
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
+ * Copyright 2014 6WIND S.A.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MEMPOOL_H_
+#define _RTE_MEMPOOL_H_
+
+/**
+ * @file
+ * RTE Mempool.
+ *
+ * A memory pool is an allocator of fixed-size object. It is
+ * identified by its name, and uses a ring to store free objects. It
+ * provides some other optional services, like a per-core object
+ * cache, and an alignment helper to ensure that objects are padded
+ * to spread them equally on all RAM channels, ranks, and so on.
+ *
+ * Objects owned by a mempool should never be added in another
+ * mempool. When an object is freed using rte_mempool_put() or
+ * equivalent, the object data is not modified; the user can save some
+ * meta-data in the object data and retrieve them when allocating a
+ * new object.
+ *
+ * Note: the mempool implementation is not preemptible. An lcore must not be
+ * interrupted by another task that uses the same mempool (because it uses a
+ * ring which is not preemptible). Also, usual mempool functions like
+ * rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL
+ * thread due to the internal per-lcore cache. Due to the lack of caching,
+ * rte_mempool_get() or rte_mempool_put() performance will suffer when called
+ * by non-EAL threads. Instead, non-EAL threads should call
+ * rte_mempool_generic_get() or rte_mempool_generic_put() with a user cache
+ * created with rte_mempool_cache_create().
+ */
+
+#include <rte_config.h>
+#include <rte_spinlock.h>
+#include <rte_debug.h>
+#include <rte_ring.h>
+#include <rte_memcpy.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* In order to mock some DPDK functions, we place headers here with the name name as the DPDK headers
+ * so these definitions wil be picked up. Only what's mocked is included.
+ */
+
+STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr);
+STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr);
+struct rte_mempool {
+ char name[RTE_MEMZONE_NAMESIZE];
+ RTE_STD_C11
+ union {
+ void *pool_data;
+ uint64_t pool_id;
+ };
+ void *pool_config;
+ const struct rte_memzone *mz;
+ unsigned int flags;
+ int socket_id;
+ uint32_t size;
+ uint32_t cache_size;
+ uint32_t elt_size;
+ uint32_t header_size;
+ uint32_t trailer_size;
+ unsigned private_data_size;
+ int32_t ops_index;
+ struct rte_mempool_cache *local_cache;
+ uint32_t populated_size;
+ struct rte_mempool_objhdr_list elt_list;
+ uint32_t nb_mem_chunks;
+ struct rte_mempool_memhdr_list mem_list;
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
+#endif
+} __rte_cache_aligned;
+#define RTE_MEMPOOL_OPS_NAMESIZE 32
+typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp);
+typedef void (*rte_mempool_free_t)(struct rte_mempool *mp);
+typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp,
+ void *const *obj_table, unsigned int n);
+typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
+ void **obj_table, unsigned int n);
+typedef unsigned(*rte_mempool_get_count)(const struct rte_mempool *mp);
+typedef int (*rte_mempool_get_capabilities_t)(const struct rte_mempool *mp,
+ unsigned int *flags);
+typedef int (*rte_mempool_ops_register_memory_area_t)
+(const struct rte_mempool *mp, char *vaddr, rte_iova_t iova, size_t len);
+struct rte_mempool_ops {
+ char name[RTE_MEMPOOL_OPS_NAMESIZE];
+ rte_mempool_alloc_t alloc;
+ rte_mempool_free_t free;
+ rte_mempool_enqueue_t enqueue;
+ rte_mempool_dequeue_t dequeue;
+ rte_mempool_get_count get_count;
+ rte_mempool_get_capabilities_t get_capabilities;
+ rte_mempool_ops_register_memory_area_t register_memory_area;
+} __rte_cache_aligned;
+#define RTE_MEMPOOL_MAX_OPS_IDX 16
+struct rte_mempool_ops_table {
+ rte_spinlock_t sl;
+ uint32_t num_ops;
+ struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX];
+} __rte_cache_aligned;
+extern struct rte_mempool_ops_table rte_mempool_ops_table;
+void
+rte_mempool_free(struct rte_mempool *mp);
+static __rte_always_inline void
+rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table,
+ unsigned int n);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMPOOL_H_ */
diff --git a/src/spdk/test/unit/lib/bdev/gpt/Makefile b/src/spdk/test/unit/lib/bdev/gpt/Makefile
new file mode 100644
index 00000000..2fad9ba0
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/gpt/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = gpt.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/bdev/gpt/gpt.c/.gitignore b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/.gitignore
new file mode 100644
index 00000000..74d476f5
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/.gitignore
@@ -0,0 +1 @@
+gpt_ut
diff --git a/src/spdk/test/unit/lib/bdev/gpt/gpt.c/Makefile b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/Makefile
new file mode 100644
index 00000000..ad21ea2a
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = gpt_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/gpt/gpt.c/gpt_ut.c b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/gpt_ut.c
new file mode 100644
index 00000000..3182f9c4
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/gpt/gpt.c/gpt_ut.c
@@ -0,0 +1,297 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+
+#include "bdev/gpt/gpt.c"
+
+static void
+test_check_mbr(void)
+{
+ struct spdk_gpt *gpt;
+ struct spdk_mbr *mbr;
+ unsigned char a[SPDK_GPT_BUFFER_SIZE];
+ int re;
+
+ /* spdk_gpt_check_mbr(NULL) does not exist, NULL is filtered out in spdk_gpt_parse() */
+ gpt = calloc(1, sizeof(*gpt));
+ SPDK_CU_ASSERT_FATAL(gpt != NULL);
+
+ /* Set *gpt is "aaa...", all are mismatch include mbr_signature */
+ memset(a, 'a', sizeof(a));
+ gpt->buf = &a[0];
+ re = spdk_gpt_check_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set mbr->mbr_signature matched, start lba mismatch */
+ mbr = (struct spdk_mbr *)gpt->buf;
+ mbr->mbr_signature = 0xAA55;
+ re = spdk_gpt_check_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set mbr->partitions[0].start lba matched, os_type mismatch */
+ mbr->partitions[0].start_lba = 1;
+ re = spdk_gpt_check_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set mbr->partitions[0].os_type matched, size_lba mismatch */
+ mbr->partitions[0].os_type = 0xEE;
+ re = spdk_gpt_check_mbr(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set mbr->partitions[0].size_lba matched, passing case */
+ mbr->partitions[0].size_lba = 0xFFFFFFFF;
+ re = spdk_gpt_check_mbr(gpt);
+ CU_ASSERT(re == 0);
+
+ free(gpt);
+}
+
+static void
+test_read_header(void)
+{
+ struct spdk_gpt *gpt;
+ struct spdk_gpt_header *head;
+ unsigned char a[SPDK_GPT_BUFFER_SIZE];
+ int re;
+
+ /* spdk_gpt_read_header(NULL) does not exist, NULL is filtered out in spdk_gpt_parse() */
+ gpt = calloc(1, sizeof(*gpt));
+ SPDK_CU_ASSERT_FATAL(gpt != NULL);
+
+ /* Set *gpt is "aaa..." */
+ memset(a, 'a', sizeof(a));
+ gpt->buf = &a[0];
+
+ /* Set header_size mismatch */
+ gpt->sector_size = 512;
+ head = (struct spdk_gpt_header *)(gpt->buf + GPT_PRIMARY_PARTITION_TABLE_LBA * gpt->sector_size);
+ to_le32(&head->header_size, 0x258);
+ re = spdk_gpt_read_header(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set head->header_size matched, header_crc32 mismatch */
+ head->header_size = sizeof(*head);
+ to_le32(&head->header_crc32, 0x22D18C80);
+ re = spdk_gpt_read_header(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set head->header_crc32 matched, gpt_signature mismatch */
+ to_le32(&head->header_crc32, 0xC5B2117E);
+ re = spdk_gpt_read_header(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set head->gpt_signature matched, lba_end usable_lba mismatch */
+ to_le32(&head->header_crc32, 0xD637335A);
+ head->gpt_signature[0] = 'E';
+ head->gpt_signature[1] = 'F';
+ head->gpt_signature[2] = 'I';
+ head->gpt_signature[3] = ' ';
+ head->gpt_signature[4] = 'P';
+ head->gpt_signature[5] = 'A';
+ head->gpt_signature[6] = 'R';
+ head->gpt_signature[7] = 'T';
+ re = spdk_gpt_read_header(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set gpt->lba_end usable_lba matched, passing case */
+ to_le32(&head->header_crc32, 0x30CB7378);
+ to_le64(&gpt->lba_start, 0x0);
+ to_le64(&gpt->lba_end, 0x2E935FFE);
+ to_le64(&head->first_usable_lba, 0xA);
+ to_le64(&head->last_usable_lba, 0xF4240);
+ re = spdk_gpt_read_header(gpt);
+ CU_ASSERT(re == 0);
+
+ free(gpt);
+}
+
+static void
+test_read_partitions(void)
+{
+ struct spdk_gpt *gpt;
+ struct spdk_gpt_header *head;
+ unsigned char a[SPDK_GPT_BUFFER_SIZE];
+ int re;
+
+ /* spdk_gpt_read_partitions(NULL) does not exist, NULL is filtered out in spdk_gpt_parse() */
+ gpt = calloc(1, sizeof(*gpt));
+ SPDK_CU_ASSERT_FATAL(gpt != NULL);
+
+ /* Set *gpt is "aaa..." */
+ memset(a, 'a', sizeof(a));
+ gpt->buf = &a[0];
+
+ /* Set num_partition_entries exceeds Max value of entries GPT supported */
+ gpt->sector_size = 512;
+ head = (struct spdk_gpt_header *)(gpt->buf + GPT_PRIMARY_PARTITION_TABLE_LBA * gpt->sector_size);
+ gpt->header = head;
+ to_le32(&head->num_partition_entries, 0x100);
+ re = spdk_gpt_read_partitions(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set num_partition_entries within Max value, size_of_partition_entry mismatch */
+ to_le32(&head->header_crc32, 0x573857BE);
+ to_le32(&head->num_partition_entries, 0x40);
+ to_le32(&head->size_of_partition_entry, 0x0);
+ re = spdk_gpt_read_partitions(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set size_of_partition_entry matched, partition_entry_lba mismatch */
+ to_le32(&head->header_crc32, 0x5279B712);
+ to_le32(&head->size_of_partition_entry, 0x80);
+ to_le64(&head->partition_entry_lba, 0x64);
+ re = spdk_gpt_read_partitions(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set partition_entry_lba matched, partition_entry_array_crc32 mismatch */
+ to_le32(&head->header_crc32, 0xEC093B43);
+ to_le64(&head->partition_entry_lba, 0x20);
+ to_le32(&head->partition_entry_array_crc32, 0x0);
+ re = spdk_gpt_read_partitions(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set partition_entry_array_crc32 matched, passing case */
+ to_le32(&head->header_crc32, 0xE1A08822);
+ to_le32(&head->partition_entry_array_crc32, 0xEBEE44FB);
+ to_le32(&head->num_partition_entries, 0x80);
+ re = spdk_gpt_read_partitions(gpt);
+ CU_ASSERT(re == 0);
+
+ free(gpt);
+}
+
+static void
+test_parse(void)
+{
+ struct spdk_gpt *gpt;
+ struct spdk_mbr *mbr;
+ struct spdk_gpt_header *head;
+ unsigned char a[SPDK_GPT_BUFFER_SIZE];
+ int re;
+
+ /* Set gpt is NULL */
+ re = spdk_gpt_parse(NULL);
+ CU_ASSERT(re == -1);
+
+ /* Set gpt->buf is NULL */
+ gpt = calloc(1, sizeof(*gpt));
+ SPDK_CU_ASSERT_FATAL(gpt != NULL);
+ re = spdk_gpt_parse(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set *gpt is "aaa...", check_mbr failed */
+ memset(a, 'a', sizeof(a));
+ gpt->buf = &a[0];
+ re = spdk_gpt_parse(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set check_mbr passed, read_header failed */
+ mbr = (struct spdk_mbr *)gpt->buf;
+ mbr->mbr_signature = 0xAA55;
+ mbr->partitions[0].start_lba = 1;
+ mbr->partitions[0].os_type = 0xEE;
+ mbr->partitions[0].size_lba = 0xFFFFFFFF;
+ re = spdk_gpt_parse(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set read_header passed, read_partitions failed */
+ gpt->sector_size = 512;
+ head = (struct spdk_gpt_header *)(gpt->buf + GPT_PRIMARY_PARTITION_TABLE_LBA * gpt->sector_size);
+ head->header_size = sizeof(*head);
+ head->gpt_signature[0] = 'E';
+ head->gpt_signature[1] = 'F';
+ head->gpt_signature[2] = 'I';
+ head->gpt_signature[3] = ' ';
+ head->gpt_signature[4] = 'P';
+ head->gpt_signature[5] = 'A';
+ head->gpt_signature[6] = 'R';
+ head->gpt_signature[7] = 'T';
+ to_le32(&head->header_crc32, 0x30CB7378);
+ to_le64(&gpt->lba_start, 0x0);
+ to_le64(&gpt->lba_end, 0x2E935FFE);
+ to_le64(&head->first_usable_lba, 0xA);
+ to_le64(&head->last_usable_lba, 0xF4240);
+ re = spdk_gpt_parse(gpt);
+ CU_ASSERT(re == -1);
+
+ /* Set read_partitions passed, all passed */
+ to_le32(&head->size_of_partition_entry, 0x80);
+ to_le64(&head->partition_entry_lba, 0x20);
+ to_le32(&head->header_crc32, 0xE1A08822);
+ to_le32(&head->partition_entry_array_crc32, 0xEBEE44FB);
+ to_le32(&head->num_partition_entries, 0x80);
+ re = spdk_gpt_parse(gpt);
+ CU_ASSERT(re == 0);
+
+ free(gpt);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("gpt_parse", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "parse",
+ test_parse) == NULL ||
+ CU_add_test(suite, "check mbr",
+ test_check_mbr) == NULL ||
+ CU_add_test(suite, "read header",
+ test_read_header) == NULL ||
+ CU_add_test(suite, "read partitions",
+ test_read_partitions) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/mt/Makefile b/src/spdk/test/unit/lib/bdev/mt/Makefile
new file mode 100644
index 00000000..a19b345a
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/mt/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = bdev.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/bdev/mt/bdev.c/.gitignore b/src/spdk/test/unit/lib/bdev/mt/bdev.c/.gitignore
new file mode 100644
index 00000000..a5a22d0d
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/mt/bdev.c/.gitignore
@@ -0,0 +1 @@
+bdev_ut
diff --git a/src/spdk/test/unit/lib/bdev/mt/bdev.c/Makefile b/src/spdk/test/unit/lib/bdev/mt/bdev.c/Makefile
new file mode 100644
index 00000000..96b48574
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/mt/bdev.c/Makefile
@@ -0,0 +1,41 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.mock.unittest.mk
+
+TEST_FILE = bdev_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c b/src/spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c
new file mode 100644
index 00000000..09740fa9
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/mt/bdev.c/bdev_ut.c
@@ -0,0 +1,1360 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+#include "common/lib/ut_multithread.c"
+#include "unit/lib/json_mock.c"
+
+#include "spdk/config.h"
+/* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
+#undef SPDK_CONFIG_VTUNE
+
+#include "bdev/bdev.c"
+
+#define BDEV_UT_NUM_THREADS 3
+
+DEFINE_STUB_V(spdk_scsi_nvme_translate, (const struct spdk_bdev_io *bdev_io,
+ int *sc, int *sk, int *asc, int *ascq));
+
+DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
+ const char *name), NULL);
+DEFINE_STUB(spdk_conf_section_get_nmval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
+DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
+
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name, const char *short_name,
+ uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_is_ptr, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+
+struct ut_bdev {
+ struct spdk_bdev bdev;
+ void *io_target;
+};
+
+struct ut_bdev_channel {
+ TAILQ_HEAD(, spdk_bdev_io) outstanding_io;
+ uint32_t outstanding_cnt;
+ uint32_t avail_cnt;
+};
+
+int g_io_device;
+struct ut_bdev g_bdev;
+struct spdk_bdev_desc *g_desc;
+bool g_teardown_done = false;
+bool g_get_io_channel = true;
+bool g_create_ch = true;
+bool g_init_complete_called = false;
+bool g_fini_start_called = true;
+
+static int
+stub_create_ch(void *io_device, void *ctx_buf)
+{
+ struct ut_bdev_channel *ch = ctx_buf;
+
+ if (g_create_ch == false) {
+ return -1;
+ }
+
+ TAILQ_INIT(&ch->outstanding_io);
+ ch->outstanding_cnt = 0;
+ /*
+ * When avail gets to 0, the submit_request function will return ENOMEM.
+ * Most tests to not want ENOMEM to occur, so by default set this to a
+ * big value that won't get hit. The ENOMEM tests can then override this
+ * value to something much smaller to induce ENOMEM conditions.
+ */
+ ch->avail_cnt = 2048;
+ return 0;
+}
+
+static void
+stub_destroy_ch(void *io_device, void *ctx_buf)
+{
+}
+
+static struct spdk_io_channel *
+stub_get_io_channel(void *ctx)
+{
+ struct ut_bdev *ut_bdev = ctx;
+
+ if (g_get_io_channel == true) {
+ return spdk_get_io_channel(ut_bdev->io_target);
+ } else {
+ return NULL;
+ }
+}
+
+static int
+stub_destruct(void *ctx)
+{
+ return 0;
+}
+
+static void
+stub_submit_request(struct spdk_io_channel *_ch, struct spdk_bdev_io *bdev_io)
+{
+ struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
+
+ if (bdev_io->type == SPDK_BDEV_IO_TYPE_RESET) {
+ struct spdk_bdev_io *io;
+
+ while (!TAILQ_EMPTY(&ch->outstanding_io)) {
+ io = TAILQ_FIRST(&ch->outstanding_io);
+ TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
+ ch->outstanding_cnt--;
+ spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_FAILED);
+ ch->avail_cnt++;
+ }
+ }
+
+ if (ch->avail_cnt > 0) {
+ TAILQ_INSERT_TAIL(&ch->outstanding_io, bdev_io, module_link);
+ ch->outstanding_cnt++;
+ ch->avail_cnt--;
+ } else {
+ spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_NOMEM);
+ }
+}
+
+static uint32_t
+stub_complete_io(void *io_target, uint32_t num_to_complete)
+{
+ struct spdk_io_channel *_ch = spdk_get_io_channel(io_target);
+ struct ut_bdev_channel *ch = spdk_io_channel_get_ctx(_ch);
+ struct spdk_bdev_io *io;
+ bool complete_all = (num_to_complete == 0);
+ uint32_t num_completed = 0;
+
+ while (complete_all || num_completed < num_to_complete) {
+ if (TAILQ_EMPTY(&ch->outstanding_io)) {
+ break;
+ }
+ io = TAILQ_FIRST(&ch->outstanding_io);
+ TAILQ_REMOVE(&ch->outstanding_io, io, module_link);
+ ch->outstanding_cnt--;
+ spdk_bdev_io_complete(io, SPDK_BDEV_IO_STATUS_SUCCESS);
+ ch->avail_cnt++;
+ num_completed++;
+ }
+
+ spdk_put_io_channel(_ch);
+ return num_completed;
+}
+
+static struct spdk_bdev_fn_table fn_table = {
+ .get_io_channel = stub_get_io_channel,
+ .destruct = stub_destruct,
+ .submit_request = stub_submit_request,
+};
+
+static int
+module_init(void)
+{
+ return 0;
+}
+
+static void
+module_fini(void)
+{
+}
+
+static void
+init_complete(void)
+{
+ g_init_complete_called = true;
+}
+
+static void
+fini_start(void)
+{
+ g_fini_start_called = true;
+}
+
+struct spdk_bdev_module bdev_ut_if = {
+ .name = "bdev_ut",
+ .module_init = module_init,
+ .module_fini = module_fini,
+ .init_complete = init_complete,
+ .fini_start = fini_start,
+};
+
+SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if)
+
+static void
+register_bdev(struct ut_bdev *ut_bdev, char *name, void *io_target)
+{
+ memset(ut_bdev, 0, sizeof(*ut_bdev));
+
+ ut_bdev->io_target = io_target;
+ ut_bdev->bdev.ctxt = ut_bdev;
+ ut_bdev->bdev.name = name;
+ ut_bdev->bdev.fn_table = &fn_table;
+ ut_bdev->bdev.module = &bdev_ut_if;
+ ut_bdev->bdev.blocklen = 4096;
+ ut_bdev->bdev.blockcnt = 1024;
+
+ spdk_bdev_register(&ut_bdev->bdev);
+}
+
+static void
+unregister_bdev(struct ut_bdev *ut_bdev)
+{
+ /* Handle any deferred messages. */
+ poll_threads();
+ spdk_bdev_unregister(&ut_bdev->bdev, NULL, NULL);
+}
+
+static void
+bdev_init_cb(void *done, int rc)
+{
+ CU_ASSERT(rc == 0);
+ *(bool *)done = true;
+}
+
+static void
+setup_test(void)
+{
+ bool done = false;
+
+ allocate_threads(BDEV_UT_NUM_THREADS);
+ set_thread(0);
+ spdk_bdev_initialize(bdev_init_cb, &done);
+ spdk_io_device_register(&g_io_device, stub_create_ch, stub_destroy_ch,
+ sizeof(struct ut_bdev_channel), NULL);
+ register_bdev(&g_bdev, "ut_bdev", &g_io_device);
+ spdk_bdev_open(&g_bdev.bdev, true, NULL, NULL, &g_desc);
+}
+
+static void
+finish_cb(void *cb_arg)
+{
+ g_teardown_done = true;
+}
+
+static void
+teardown_test(void)
+{
+ set_thread(0);
+ g_teardown_done = false;
+ spdk_bdev_close(g_desc);
+ g_desc = NULL;
+ unregister_bdev(&g_bdev);
+ spdk_io_device_unregister(&g_io_device, NULL);
+ spdk_bdev_finish(finish_cb, NULL);
+ poll_threads();
+ memset(&g_bdev, 0, sizeof(g_bdev));
+ CU_ASSERT(g_teardown_done == true);
+ g_teardown_done = false;
+ free_threads();
+}
+
+static uint32_t
+bdev_io_tailq_cnt(bdev_io_tailq_t *tailq)
+{
+ struct spdk_bdev_io *io;
+ uint32_t cnt = 0;
+
+ TAILQ_FOREACH(io, tailq, internal.link) {
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static void
+basic(void)
+{
+ g_init_complete_called = false;
+ setup_test();
+ CU_ASSERT(g_init_complete_called == true);
+
+ set_thread(0);
+
+ g_get_io_channel = false;
+ g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(g_ut_threads[0].ch == NULL);
+
+ g_get_io_channel = true;
+ g_create_ch = false;
+ g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(g_ut_threads[0].ch == NULL);
+
+ g_get_io_channel = true;
+ g_create_ch = true;
+ g_ut_threads[0].ch = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(g_ut_threads[0].ch != NULL);
+ spdk_put_io_channel(g_ut_threads[0].ch);
+
+ g_fini_start_called = false;
+ teardown_test();
+ CU_ASSERT(g_fini_start_called == true);
+}
+
+static void
+_bdev_removed(void *done)
+{
+ *(bool *)done = true;
+}
+
+static void
+_bdev_unregistered(void *done, int rc)
+{
+ CU_ASSERT(rc == 0);
+ *(bool *)done = true;
+}
+
+static void
+unregister_and_close(void)
+{
+ bool done, remove_notify;
+ struct spdk_bdev_desc *desc;
+
+ setup_test();
+ set_thread(0);
+
+ /* setup_test() automatically opens the bdev,
+ * but this test needs to do that in a different
+ * way. */
+ spdk_bdev_close(g_desc);
+ poll_threads();
+
+ remove_notify = false;
+ spdk_bdev_open(&g_bdev.bdev, true, _bdev_removed, &remove_notify, &desc);
+ CU_ASSERT(remove_notify == false);
+ CU_ASSERT(desc != NULL);
+
+ /* There is an open descriptor on the device. Unregister it,
+ * which can't proceed until the descriptor is closed. */
+ done = false;
+ spdk_bdev_unregister(&g_bdev.bdev, _bdev_unregistered, &done);
+ /* No polling has occurred, so neither of these should execute */
+ CU_ASSERT(remove_notify == false);
+ CU_ASSERT(done == false);
+
+ /* Prior to the unregister completing, close the descriptor */
+ spdk_bdev_close(desc);
+
+ /* Poll the threads to allow all events to be processed */
+ poll_threads();
+
+ /* Remove notify should not have been called because the
+ * descriptor is already closed. */
+ CU_ASSERT(remove_notify == false);
+
+ /* The unregister should have completed */
+ CU_ASSERT(done == true);
+
+ spdk_bdev_finish(finish_cb, NULL);
+ poll_threads();
+ free_threads();
+}
+
+static void
+reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ bool *done = cb_arg;
+
+ CU_ASSERT(success == true);
+ *done = true;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+put_channel_during_reset(void)
+{
+ struct spdk_io_channel *io_ch;
+ bool done = false;
+
+ setup_test();
+
+ set_thread(0);
+ io_ch = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(io_ch != NULL);
+
+ /*
+ * Start a reset, but then put the I/O channel before
+ * the deferred messages for the reset get a chance to
+ * execute.
+ */
+ spdk_bdev_reset(g_desc, io_ch, reset_done, &done);
+ spdk_put_io_channel(io_ch);
+ poll_threads();
+ stub_complete_io(g_bdev.io_target, 0);
+
+ teardown_test();
+}
+
+static void
+aborted_reset_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ enum spdk_bdev_io_status *status = cb_arg;
+
+ *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+aborted_reset(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ enum spdk_bdev_io_status status1 = SPDK_BDEV_IO_STATUS_PENDING,
+ status2 = SPDK_BDEV_IO_STATUS_PENDING;
+
+ setup_test();
+
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(io_ch[0] != NULL);
+ spdk_bdev_reset(g_desc, io_ch[0], aborted_reset_done, &status1);
+ poll_threads();
+ CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
+
+ /*
+ * First reset has been submitted on ch0. Now submit a second
+ * reset on ch1 which will get queued since there is already a
+ * reset in progress.
+ */
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ CU_ASSERT(io_ch[1] != NULL);
+ spdk_bdev_reset(g_desc, io_ch[1], aborted_reset_done, &status2);
+ poll_threads();
+ CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
+
+ /*
+ * Now destroy ch1. This will abort the queued reset. Check that
+ * the second reset was completed with failed status. Also check
+ * that bdev->internal.reset_in_progress != NULL, since the
+ * original reset has not been completed yet. This ensures that
+ * the bdev code is correctly noticing that the failed reset is
+ * *not* the one that had been submitted to the bdev module.
+ */
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ poll_threads();
+ CU_ASSERT(status2 == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(g_bdev.bdev.internal.reset_in_progress != NULL);
+
+ /*
+ * Now complete the first reset, verify that it completed with SUCCESS
+ * status and that bdev->internal.reset_in_progress is also set back to NULL.
+ */
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(g_bdev.bdev.internal.reset_in_progress == NULL);
+
+ teardown_test();
+}
+
+static void
+io_during_io_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ enum spdk_bdev_io_status *status = cb_arg;
+
+ *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+io_during_reset(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ struct spdk_bdev_channel *bdev_ch[2];
+ enum spdk_bdev_io_status status0, status1, status_reset;
+ int rc;
+
+ setup_test();
+
+ /*
+ * First test normal case - submit an I/O on each of two channels (with no resets)
+ * and verify they complete successfully.
+ */
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == 0);
+ status0 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
+ CU_ASSERT(rc == 0);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == 0);
+ status1 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
+ CU_ASSERT(rc == 0);
+
+ poll_threads();
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
+
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ set_thread(1);
+ stub_complete_io(g_bdev.io_target, 0);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /*
+ * Now submit a reset, and leave it pending while we submit I/O on two different
+ * channels. These I/O should be failed by the bdev layer since the reset is in
+ * progress.
+ */
+ set_thread(0);
+ status_reset = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &status_reset);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(bdev_ch[0]->flags == 0);
+ CU_ASSERT(bdev_ch[1]->flags == 0);
+ poll_threads();
+ CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_RESET_IN_PROGRESS);
+ CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_RESET_IN_PROGRESS);
+
+ set_thread(0);
+ status0 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
+ CU_ASSERT(rc == 0);
+
+ set_thread(1);
+ status1 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
+ CU_ASSERT(rc == 0);
+
+ /*
+ * A reset is in progress so these read I/O should complete with failure. Note that we
+ * need to poll_threads() since I/O completed inline have their completion deferred.
+ */
+ poll_threads();
+ CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
+
+ /*
+ * Complete the reset
+ */
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+
+ /*
+ * Only poll thread 0. We should not get a completion.
+ */
+ poll_thread(0);
+ CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_PENDING);
+
+ /*
+ * Poll both thread 0 and 1 so the messages can propagate and we
+ * get a completion.
+ */
+ poll_threads();
+ CU_ASSERT(status_reset == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ spdk_put_io_channel(io_ch[0]);
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ poll_threads();
+
+ teardown_test();
+}
+
+static void
+basic_qos(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ struct spdk_bdev_channel *bdev_ch[2];
+ struct spdk_bdev *bdev;
+ enum spdk_bdev_io_status status;
+ int rc;
+
+ setup_test();
+
+ /* Enable QoS */
+ bdev = &g_bdev.bdev;
+ bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
+ SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
+ TAILQ_INIT(&bdev->internal.qos->queued);
+ /*
+ * Enable both IOPS and bandwidth rate limits.
+ * In this case, both rate limits will take equal effect.
+ */
+ /* 2000 I/O per second, or 2 per millisecond */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
+ /* 8K byte per millisecond with 4K block size */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
+
+ g_get_io_channel = true;
+
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
+
+ /*
+ * Send an I/O on thread 0, which is where the QoS thread is running.
+ */
+ set_thread(0);
+ status = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_threads();
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Send an I/O on thread 1. The QoS thread is not running here. */
+ status = SPDK_BDEV_IO_STATUS_PENDING;
+ set_thread(1);
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_threads();
+ /* Complete I/O on thread 1. This should not complete the I/O we submitted */
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_PENDING);
+ /* Now complete I/O on thread 0 */
+ set_thread(0);
+ poll_threads();
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Tear down the channels */
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ poll_threads();
+ set_thread(0);
+
+ /* Close the descriptor, which should stop the qos channel */
+ spdk_bdev_close(g_desc);
+ poll_threads();
+ CU_ASSERT(bdev->internal.qos->ch == NULL);
+
+ spdk_bdev_open(bdev, true, NULL, NULL, &g_desc);
+
+ /* Create the channels in reverse order. */
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
+
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
+
+ /* Confirm that the qos thread is now thread 1 */
+ CU_ASSERT(bdev->internal.qos->ch == bdev_ch[1]);
+
+ /* Tear down the channels */
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ poll_threads();
+
+ set_thread(0);
+
+ teardown_test();
+}
+
+static void
+io_during_qos_queue(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ struct spdk_bdev_channel *bdev_ch[2];
+ struct spdk_bdev *bdev;
+ enum spdk_bdev_io_status status0, status1;
+ int rc;
+
+ setup_test();
+ reset_time();
+
+ /* Enable QoS */
+ bdev = &g_bdev.bdev;
+ bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
+ SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
+ TAILQ_INIT(&bdev->internal.qos->queued);
+ /*
+ * Enable both IOPS and bandwidth rate limits.
+ * In this case, IOPS rate limit will take effect first.
+ */
+ /* 1000 I/O per second, or 1 per millisecond */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 1000;
+ /* 8K byte per millisecond with 4K block size */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 8192000;
+
+ g_get_io_channel = true;
+
+ /* Create channels */
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
+
+ /* Send two I/O */
+ status1 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
+ set_thread(0);
+ status0 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
+
+ /* Complete any I/O that arrived at the disk */
+ poll_threads();
+ set_thread(1);
+ stub_complete_io(g_bdev.io_target, 0);
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+
+ /* Only one of the I/O should complete. (logical XOR) */
+ if (status0 == SPDK_BDEV_IO_STATUS_SUCCESS) {
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
+ } else {
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
+ }
+
+ /* Advance in time by a millisecond */
+ increment_time(1000);
+
+ /* Complete more I/O */
+ poll_threads();
+ set_thread(1);
+ stub_complete_io(g_bdev.io_target, 0);
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+
+ /* Now the second I/O should be done */
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Tear down the channels */
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ poll_threads();
+
+ teardown_test();
+}
+
+static void
+io_during_qos_reset(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ struct spdk_bdev_channel *bdev_ch[2];
+ struct spdk_bdev *bdev;
+ enum spdk_bdev_io_status status0, status1, reset_status;
+ int rc;
+
+ setup_test();
+ reset_time();
+
+ /* Enable QoS */
+ bdev = &g_bdev.bdev;
+ bdev->internal.qos = calloc(1, sizeof(*bdev->internal.qos));
+ SPDK_CU_ASSERT_FATAL(bdev->internal.qos != NULL);
+ TAILQ_INIT(&bdev->internal.qos->queued);
+ /*
+ * Enable both IOPS and bandwidth rate limits.
+ * In this case, bandwidth rate limit will take effect first.
+ */
+ /* 2000 I/O per second, or 2 per millisecond */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT].limit = 2000;
+ /* 4K byte per millisecond with 4K block size */
+ bdev->internal.qos->rate_limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT].limit = 4096000;
+
+ g_get_io_channel = true;
+
+ /* Create channels */
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == BDEV_CH_QOS_ENABLED);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == BDEV_CH_QOS_ENABLED);
+
+ /* Send two I/O. One of these gets queued by QoS. The other is sitting at the disk. */
+ status1 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &status1);
+ CU_ASSERT(rc == 0);
+ set_thread(0);
+ status0 = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &status0);
+ CU_ASSERT(rc == 0);
+
+ poll_threads();
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_PENDING);
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_PENDING);
+
+ /* Reset the bdev. */
+ reset_status = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_reset(g_desc, io_ch[0], io_during_io_done, &reset_status);
+ CU_ASSERT(rc == 0);
+
+ /* Complete any I/O that arrived at the disk */
+ poll_threads();
+ set_thread(1);
+ stub_complete_io(g_bdev.io_target, 0);
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+
+ CU_ASSERT(reset_status == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(status0 == SPDK_BDEV_IO_STATUS_FAILED);
+ CU_ASSERT(status1 == SPDK_BDEV_IO_STATUS_FAILED);
+
+ /* Tear down the channels */
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ poll_threads();
+
+ teardown_test();
+}
+
+static void
+enomem_done(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
+{
+ enum spdk_bdev_io_status *status = cb_arg;
+
+ *status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED;
+ spdk_bdev_free_io(bdev_io);
+}
+
+static void
+enomem(void)
+{
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *bdev_ch;
+ struct spdk_bdev_shared_resource *shared_resource;
+ struct ut_bdev_channel *ut_ch;
+ const uint32_t IO_ARRAY_SIZE = 64;
+ const uint32_t AVAIL = 20;
+ enum spdk_bdev_io_status status[IO_ARRAY_SIZE], status_reset;
+ uint32_t nomem_cnt, i;
+ struct spdk_bdev_io *first_io;
+ int rc;
+
+ setup_test();
+
+ set_thread(0);
+ io_ch = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch = spdk_io_channel_get_ctx(io_ch);
+ shared_resource = bdev_ch->shared_resource;
+ ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
+ ut_ch->avail_cnt = AVAIL;
+
+ /* First submit a number of IOs equal to what the channel can support. */
+ for (i = 0; i < AVAIL; i++) {
+ status[i] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
+ CU_ASSERT(rc == 0);
+ }
+ CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
+
+ /*
+ * Next, submit one additional I/O. This one should fail with ENOMEM and then go onto
+ * the enomem_io list.
+ */
+ status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
+ first_io = TAILQ_FIRST(&shared_resource->nomem_io);
+
+ /*
+ * Now submit a bunch more I/O. These should all fail with ENOMEM and get queued behind
+ * the first_io above.
+ */
+ for (i = AVAIL + 1; i < IO_ARRAY_SIZE; i++) {
+ status[i] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
+ CU_ASSERT(rc == 0);
+ }
+
+ /* Assert that first_io is still at the head of the list. */
+ CU_ASSERT(TAILQ_FIRST(&shared_resource->nomem_io) == first_io);
+ CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == (IO_ARRAY_SIZE - AVAIL));
+ nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
+ CU_ASSERT(shared_resource->nomem_threshold == (AVAIL - NOMEM_THRESHOLD_COUNT));
+
+ /*
+ * Complete 1 I/O only. The key check here is bdev_io_tailq_cnt - this should not have
+ * changed since completing just 1 I/O should not trigger retrying the queued nomem_io
+ * list.
+ */
+ stub_complete_io(g_bdev.io_target, 1);
+ CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
+
+ /*
+ * Complete enough I/O to hit the nomem_theshold. This should trigger retrying nomem_io,
+ * and we should see I/O get resubmitted to the test bdev module.
+ */
+ stub_complete_io(g_bdev.io_target, NOMEM_THRESHOLD_COUNT - 1);
+ CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) < nomem_cnt);
+ nomem_cnt = bdev_io_tailq_cnt(&shared_resource->nomem_io);
+
+ /* Complete 1 I/O only. This should not trigger retrying the queued nomem_io. */
+ stub_complete_io(g_bdev.io_target, 1);
+ CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == nomem_cnt);
+
+ /*
+ * Send a reset and confirm that all I/O are completed, including the ones that
+ * were queued on the nomem_io list.
+ */
+ status_reset = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_reset(g_desc, io_ch, enomem_done, &status_reset);
+ poll_threads();
+ CU_ASSERT(rc == 0);
+ /* This will complete the reset. */
+ stub_complete_io(g_bdev.io_target, 0);
+
+ CU_ASSERT(bdev_io_tailq_cnt(&shared_resource->nomem_io) == 0);
+ CU_ASSERT(shared_resource->io_outstanding == 0);
+
+ spdk_put_io_channel(io_ch);
+ poll_threads();
+ teardown_test();
+}
+
+static void
+enomem_multi_bdev(void)
+{
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *bdev_ch;
+ struct spdk_bdev_shared_resource *shared_resource;
+ struct ut_bdev_channel *ut_ch;
+ const uint32_t IO_ARRAY_SIZE = 64;
+ const uint32_t AVAIL = 20;
+ enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
+ uint32_t i;
+ struct ut_bdev *second_bdev;
+ struct spdk_bdev_desc *second_desc = NULL;
+ struct spdk_bdev_channel *second_bdev_ch;
+ struct spdk_io_channel *second_ch;
+ int rc;
+
+ setup_test();
+
+ /* Register second bdev with the same io_target */
+ second_bdev = calloc(1, sizeof(*second_bdev));
+ SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
+ register_bdev(second_bdev, "ut_bdev2", g_bdev.io_target);
+ spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
+ SPDK_CU_ASSERT_FATAL(second_desc != NULL);
+
+ set_thread(0);
+ io_ch = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch = spdk_io_channel_get_ctx(io_ch);
+ shared_resource = bdev_ch->shared_resource;
+ ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
+ ut_ch->avail_cnt = AVAIL;
+
+ second_ch = spdk_bdev_get_io_channel(second_desc);
+ second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
+ SPDK_CU_ASSERT_FATAL(shared_resource == second_bdev_ch->shared_resource);
+
+ /* Saturate io_target through bdev A. */
+ for (i = 0; i < AVAIL; i++) {
+ status[i] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
+ CU_ASSERT(rc == 0);
+ }
+ CU_ASSERT(TAILQ_EMPTY(&shared_resource->nomem_io));
+
+ /*
+ * Now submit I/O through the second bdev. This should fail with ENOMEM
+ * and then go onto the nomem_io list.
+ */
+ status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&shared_resource->nomem_io));
+
+ /* Complete first bdev's I/O. This should retry sending second bdev's nomem_io */
+ stub_complete_io(g_bdev.io_target, AVAIL);
+
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&shared_resource->nomem_io));
+ CU_ASSERT(shared_resource->io_outstanding == 1);
+
+ /* Now complete our retried I/O */
+ stub_complete_io(g_bdev.io_target, 1);
+ SPDK_CU_ASSERT_FATAL(shared_resource->io_outstanding == 0);
+
+ spdk_put_io_channel(io_ch);
+ spdk_put_io_channel(second_ch);
+ spdk_bdev_close(second_desc);
+ unregister_bdev(second_bdev);
+ poll_threads();
+ free(second_bdev);
+ teardown_test();
+}
+
+
+static void
+enomem_multi_io_target(void)
+{
+ struct spdk_io_channel *io_ch;
+ struct spdk_bdev_channel *bdev_ch;
+ struct ut_bdev_channel *ut_ch;
+ const uint32_t IO_ARRAY_SIZE = 64;
+ const uint32_t AVAIL = 20;
+ enum spdk_bdev_io_status status[IO_ARRAY_SIZE];
+ uint32_t i;
+ int new_io_device;
+ struct ut_bdev *second_bdev;
+ struct spdk_bdev_desc *second_desc = NULL;
+ struct spdk_bdev_channel *second_bdev_ch;
+ struct spdk_io_channel *second_ch;
+ int rc;
+
+ setup_test();
+
+ /* Create new io_target and a second bdev using it */
+ spdk_io_device_register(&new_io_device, stub_create_ch, stub_destroy_ch,
+ sizeof(struct ut_bdev_channel), NULL);
+ second_bdev = calloc(1, sizeof(*second_bdev));
+ SPDK_CU_ASSERT_FATAL(second_bdev != NULL);
+ register_bdev(second_bdev, "ut_bdev2", &new_io_device);
+ spdk_bdev_open(&second_bdev->bdev, true, NULL, NULL, &second_desc);
+ SPDK_CU_ASSERT_FATAL(second_desc != NULL);
+
+ set_thread(0);
+ io_ch = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch = spdk_io_channel_get_ctx(io_ch);
+ ut_ch = spdk_io_channel_get_ctx(bdev_ch->channel);
+ ut_ch->avail_cnt = AVAIL;
+
+ /* Different io_target should imply a different shared_resource */
+ second_ch = spdk_bdev_get_io_channel(second_desc);
+ second_bdev_ch = spdk_io_channel_get_ctx(second_ch);
+ SPDK_CU_ASSERT_FATAL(bdev_ch->shared_resource != second_bdev_ch->shared_resource);
+
+ /* Saturate io_target through bdev A. */
+ for (i = 0; i < AVAIL; i++) {
+ status[i] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[i]);
+ CU_ASSERT(rc == 0);
+ }
+ CU_ASSERT(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
+
+ /* Issue one more I/O to fill ENOMEM list. */
+ status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
+
+ /*
+ * Now submit I/O through the second bdev. This should go through and complete
+ * successfully because we're using a different io_device underneath.
+ */
+ status[AVAIL] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(second_desc, second_ch, NULL, 0, 1, enomem_done, &status[AVAIL]);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&second_bdev_ch->shared_resource->nomem_io));
+ stub_complete_io(second_bdev->io_target, 1);
+
+ /* Cleanup; Complete outstanding I/O. */
+ stub_complete_io(g_bdev.io_target, AVAIL);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
+ /* Complete the ENOMEM I/O */
+ stub_complete_io(g_bdev.io_target, 1);
+ CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
+
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&bdev_ch->shared_resource->nomem_io));
+ CU_ASSERT(bdev_ch->shared_resource->io_outstanding == 0);
+ spdk_put_io_channel(io_ch);
+ spdk_put_io_channel(second_ch);
+ spdk_bdev_close(second_desc);
+ unregister_bdev(second_bdev);
+ spdk_io_device_unregister(&new_io_device, NULL);
+ poll_threads();
+ free(second_bdev);
+ teardown_test();
+}
+
+static void
+qos_dynamic_enable_done(void *cb_arg, int status)
+{
+ int *rc = cb_arg;
+ *rc = status;
+}
+
+static void
+qos_dynamic_enable(void)
+{
+ struct spdk_io_channel *io_ch[2];
+ struct spdk_bdev_channel *bdev_ch[2];
+ struct spdk_bdev *bdev;
+ enum spdk_bdev_io_status bdev_io_status[2];
+ uint64_t limits[SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES] = {};
+ int status, second_status, rc, i;
+
+ setup_test();
+ reset_time();
+
+ for (i = 0; i < SPDK_BDEV_QOS_NUM_RATE_LIMIT_TYPES; i++) {
+ limits[i] = UINT64_MAX;
+ }
+
+ bdev = &g_bdev.bdev;
+
+ g_get_io_channel = true;
+
+ /* Create channels */
+ set_thread(0);
+ io_ch[0] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[0] = spdk_io_channel_get_ctx(io_ch[0]);
+ CU_ASSERT(bdev_ch[0]->flags == 0);
+
+ set_thread(1);
+ io_ch[1] = spdk_bdev_get_io_channel(g_desc);
+ bdev_ch[1] = spdk_io_channel_get_ctx(io_ch[1]);
+ CU_ASSERT(bdev_ch[1]->flags == 0);
+
+ set_thread(0);
+
+ /*
+ * Enable QoS: IOPS and byte per second rate limits.
+ * More than 10 I/Os allowed per timeslice.
+ */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
+ limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 100;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
+
+ /*
+ * Submit and complete 10 I/O to fill the QoS allotment for this timeslice.
+ * Additional I/O will then be queued.
+ */
+ set_thread(0);
+ for (i = 0; i < 10; i++) {
+ bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
+ }
+
+ /*
+ * Send two more I/O. These I/O will be queued since the current timeslice allotment has been
+ * filled already. We want to test that when QoS is disabled that these two I/O:
+ * 1) are not aborted
+ * 2) are sent back to their original thread for resubmission
+ */
+ bdev_io_status[0] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[0], NULL, 0, 1, io_during_io_done, &bdev_io_status[0]);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_PENDING);
+ set_thread(1);
+ bdev_io_status[1] = SPDK_BDEV_IO_STATUS_PENDING;
+ rc = spdk_bdev_read_blocks(g_desc, io_ch[1], NULL, 0, 1, io_during_io_done, &bdev_io_status[1]);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
+ poll_threads();
+
+ /* Disable QoS: IOPS rate limit */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
+
+ /* Disable QoS: Byte per second rate limit */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 0;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
+
+ /*
+ * All I/O should have been resubmitted back on their original thread. Complete
+ * all I/O on thread 0, and ensure that only the thread 0 I/O was completed.
+ */
+ set_thread(0);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(bdev_io_status[0] == SPDK_BDEV_IO_STATUS_SUCCESS);
+ CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_PENDING);
+
+ /* Now complete all I/O on thread 1 and ensure the thread 1 I/O was completed. */
+ set_thread(1);
+ stub_complete_io(g_bdev.io_target, 0);
+ poll_threads();
+ CU_ASSERT(bdev_io_status[1] == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /* Disable QoS again */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0); /* This should succeed */
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
+
+ /* Enable QoS on thread 0 */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
+
+ /* Disable QoS on thread 1 */
+ set_thread(1);
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 0;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ /* Don't poll yet. This should leave the channels with QoS enabled */
+ CU_ASSERT(status == -1);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
+
+ /* Enable QoS. This should immediately fail because the previous disable QoS hasn't completed. */
+ second_status = 0;
+ limits[SPDK_BDEV_QOS_RW_BPS_RATE_LIMIT] = 10;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &second_status);
+ poll_threads();
+ CU_ASSERT(status == 0); /* The disable should succeed */
+ CU_ASSERT(second_status < 0); /* The enable should fail */
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) == 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) == 0);
+
+ /* Enable QoS on thread 1. This should succeed now that the disable has completed. */
+ status = -1;
+ limits[SPDK_BDEV_QOS_RW_IOPS_RATE_LIMIT] = 10000;
+ spdk_bdev_set_qos_rate_limits(bdev, limits, qos_dynamic_enable_done, &status);
+ poll_threads();
+ CU_ASSERT(status == 0);
+ CU_ASSERT((bdev_ch[0]->flags & BDEV_CH_QOS_ENABLED) != 0);
+ CU_ASSERT((bdev_ch[1]->flags & BDEV_CH_QOS_ENABLED) != 0);
+
+ /* Tear down the channels */
+ set_thread(0);
+ spdk_put_io_channel(io_ch[0]);
+ set_thread(1);
+ spdk_put_io_channel(io_ch[1]);
+ poll_threads();
+
+ set_thread(0);
+ teardown_test();
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("bdev", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "basic", basic) == NULL ||
+ CU_add_test(suite, "unregister_and_close", unregister_and_close) == NULL ||
+ CU_add_test(suite, "basic_qos", basic_qos) == NULL ||
+ CU_add_test(suite, "put_channel_during_reset", put_channel_during_reset) == NULL ||
+ CU_add_test(suite, "aborted_reset", aborted_reset) == NULL ||
+ CU_add_test(suite, "io_during_reset", io_during_reset) == NULL ||
+ CU_add_test(suite, "io_during_qos_queue", io_during_qos_queue) == NULL ||
+ CU_add_test(suite, "io_during_qos_reset", io_during_qos_reset) == NULL ||
+ CU_add_test(suite, "enomem", enomem) == NULL ||
+ CU_add_test(suite, "enomem_multi_bdev", enomem_multi_bdev) == NULL ||
+ CU_add_test(suite, "enomem_multi_io_target", enomem_multi_io_target) == NULL ||
+ CU_add_test(suite, "qos_dynamic_enable", qos_dynamic_enable) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/part.c/.gitignore b/src/spdk/test/unit/lib/bdev/part.c/.gitignore
new file mode 100644
index 00000000..c8302779
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/part.c/.gitignore
@@ -0,0 +1 @@
+part_ut
diff --git a/src/spdk/test/unit/lib/bdev/part.c/Makefile b/src/spdk/test/unit/lib/bdev/part.c/Makefile
new file mode 100644
index 00000000..9073c5cd
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/part.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = part_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/part.c/part_ut.c b/src/spdk/test/unit/lib/bdev/part.c/part_ut.c
new file mode 100644
index 00000000..fd251f4c
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/part.c/part_ut.c
@@ -0,0 +1,179 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+#include "unit/lib/json_mock.c"
+
+#include "spdk/config.h"
+/* HACK: disable VTune integration so the unit test doesn't need VTune headers and libs to build */
+#undef SPDK_CONFIG_VTUNE
+
+#include "bdev/bdev.c"
+#include "bdev/part.c"
+
+DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *, (struct spdk_conf *cp,
+ const char *name), NULL);
+DEFINE_STUB(spdk_conf_section_get_nmval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
+DEFINE_STUB(spdk_conf_section_get_intval, int, (struct spdk_conf_section *sp, const char *key), -1);
+
+struct spdk_trace_histories *g_trace_histories;
+DEFINE_STUB_V(spdk_trace_add_register_fn, (struct spdk_trace_register_fn *reg_fn));
+DEFINE_STUB_V(spdk_trace_register_owner, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_object, (uint8_t type, char id_prefix));
+DEFINE_STUB_V(spdk_trace_register_description, (const char *name, const char *short_name,
+ uint16_t tpoint_id, uint8_t owner_type,
+ uint8_t object_type, uint8_t new_object,
+ uint8_t arg1_is_ptr, const char *arg1_name));
+DEFINE_STUB_V(_spdk_trace_record, (uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1));
+
+static void
+_part_send_msg(spdk_thread_fn fn, void *ctx, void *thread_ctx)
+{
+ fn(ctx);
+}
+
+static void
+_part_cleanup(struct spdk_bdev_part *part)
+{
+ free(part->internal.bdev.name);
+ free(part->internal.bdev.product_name);
+}
+
+void
+spdk_scsi_nvme_translate(const struct spdk_bdev_io *bdev_io,
+ int *sc, int *sk, int *asc, int *ascq)
+{
+}
+
+struct spdk_bdev_module bdev_ut_if = {
+ .name = "bdev_ut",
+};
+
+static void vbdev_ut_examine(struct spdk_bdev *bdev);
+
+struct spdk_bdev_module vbdev_ut_if = {
+ .name = "vbdev_ut",
+ .examine_config = vbdev_ut_examine,
+};
+
+SPDK_BDEV_MODULE_REGISTER(&bdev_ut_if)
+SPDK_BDEV_MODULE_REGISTER(&vbdev_ut_if)
+
+static void
+vbdev_ut_examine(struct spdk_bdev *bdev)
+{
+ spdk_bdev_module_examine_done(&vbdev_ut_if);
+}
+
+static int
+__destruct(void *ctx)
+{
+ return 0;
+}
+
+static struct spdk_bdev_fn_table base_fn_table = {
+ .destruct = __destruct,
+};
+static struct spdk_bdev_fn_table part_fn_table = {
+ .destruct = __destruct,
+};
+
+static void
+part_test(void)
+{
+ struct spdk_bdev_part_base *base;
+ struct spdk_bdev_part part1 = {};
+ struct spdk_bdev_part part2 = {};
+ struct spdk_bdev bdev_base = {};
+ SPDK_BDEV_PART_TAILQ tailq = TAILQ_HEAD_INITIALIZER(tailq);
+ int rc;
+
+ bdev_base.name = "base";
+ bdev_base.fn_table = &base_fn_table;
+ bdev_base.module = &bdev_ut_if;
+ rc = spdk_bdev_register(&bdev_base);
+ CU_ASSERT(rc == 0);
+ base = spdk_bdev_part_base_construct(&bdev_base, NULL, &vbdev_ut_if,
+ &part_fn_table, &tailq, NULL,
+ NULL, 0, NULL, NULL);
+
+ SPDK_CU_ASSERT_FATAL(base != NULL);
+
+ rc = spdk_bdev_part_construct(&part1, base, "test1", 0, 100, "test");
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ rc = spdk_bdev_part_construct(&part2, base, "test2", 100, 100, "test");
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ spdk_bdev_part_base_hotremove(&bdev_base, &tailq);
+
+ spdk_bdev_part_base_free(base);
+ _part_cleanup(&part1);
+ _part_cleanup(&part2);
+ spdk_bdev_unregister(&bdev_base, NULL, NULL);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("bdev_part", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "part", part_test) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ spdk_allocate_thread(_part_send_msg, NULL, NULL, NULL, "thread0");
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ spdk_free_thread();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/pmem/.gitignore b/src/spdk/test/unit/lib/bdev/pmem/.gitignore
new file mode 100644
index 00000000..b2e0df1e
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/pmem/.gitignore
@@ -0,0 +1 @@
+bdev_pmem_ut
diff --git a/src/spdk/test/unit/lib/bdev/pmem/Makefile b/src/spdk/test/unit/lib/bdev/pmem/Makefile
new file mode 100644
index 00000000..9c0e7dc1
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/pmem/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = bdev_pmem_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/pmem/bdev_pmem_ut.c b/src/spdk/test/unit/lib/bdev/pmem/bdev_pmem_ut.c
new file mode 100644
index 00000000..742ec638
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/pmem/bdev_pmem_ut.c
@@ -0,0 +1,783 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+#include "unit/lib/json_mock.c"
+
+#include "bdev/pmem/bdev_pmem.c"
+
+DEFINE_STUB(spdk_conf_find_section, struct spdk_conf_section *,
+ (struct spdk_conf *cp, const char *name), NULL);
+DEFINE_STUB(spdk_conf_section_get_nval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx), NULL);
+DEFINE_STUB(spdk_conf_section_get_nmval, char *,
+ (struct spdk_conf_section *sp, const char *key, int idx1, int idx2), NULL);
+
+static struct spdk_bdev_module *g_bdev_pmem_module;
+static int g_bdev_module_cnt;
+
+struct pmemblk {
+ const char *name;
+ bool is_open;
+ bool is_consistent;
+ size_t bsize;
+ long long nblock;
+
+ uint8_t *buffer;
+};
+
+static const char *g_bdev_name = "pmem0";
+
+/* PMEMblkpool is a typedef of struct pmemblk */
+static PMEMblkpool g_pool_ok = {
+ .name = "/pools/ok_pool",
+ .is_open = false,
+ .is_consistent = true,
+ .bsize = 4096,
+ .nblock = 150
+};
+
+static PMEMblkpool g_pool_nblock_0 = {
+ .name = "/pools/nblock_0",
+ .is_open = false,
+ .is_consistent = true,
+ .bsize = 4096,
+ .nblock = 0
+};
+
+static PMEMblkpool g_pool_bsize_0 = {
+ .name = "/pools/nblock_0",
+ .is_open = false,
+ .is_consistent = true,
+ .bsize = 0,
+ .nblock = 100
+};
+
+static PMEMblkpool g_pool_inconsistent = {
+ .name = "/pools/inconsistent",
+ .is_open = false,
+ .is_consistent = false,
+ .bsize = 512,
+ .nblock = 1
+};
+
+static int g_opened_pools;
+static struct spdk_bdev *g_bdev;
+static const char *g_check_version_msg;
+static bool g_pmemblk_open_allow_open = true;
+
+static void
+_pmem_send_msg(spdk_thread_fn fn, void *ctx, void *thread_ctx)
+{
+ fn(ctx);
+}
+
+static PMEMblkpool *
+find_pmemblk_pool(const char *path)
+{
+ if (path == NULL) {
+ errno = EINVAL;
+ return NULL;
+ } else if (strcmp(g_pool_ok.name, path) == 0) {
+ return &g_pool_ok;
+ } else if (strcmp(g_pool_nblock_0.name, path) == 0) {
+ return &g_pool_nblock_0;
+ } else if (strcmp(g_pool_bsize_0.name, path) == 0) {
+ return &g_pool_bsize_0;
+ } else if (strcmp(g_pool_inconsistent.name, path) == 0) {
+ return &g_pool_inconsistent;
+ }
+
+ errno = ENOENT;
+ return NULL;
+}
+
+PMEMblkpool *
+pmemblk_open(const char *path, size_t bsize)
+{
+ PMEMblkpool *pool;
+
+ if (!g_pmemblk_open_allow_open) {
+ errno = EIO;
+ return NULL;
+ }
+
+ pool = find_pmemblk_pool(path);
+ if (!pool) {
+ errno = ENOENT;
+ return NULL;
+ }
+
+ CU_ASSERT_TRUE_FATAL(pool->is_consistent);
+ CU_ASSERT_FALSE(pool->is_open);
+ if (pool->is_open == false) {
+ pool->is_open = true;
+ g_opened_pools++;
+ } else {
+ errno = EBUSY;
+ pool = NULL;
+ }
+
+ return pool;
+}
+void
+spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
+{
+ cb(NULL, bdev_io);
+}
+
+static void
+check_open_pool_fatal(PMEMblkpool *pool)
+{
+ SPDK_CU_ASSERT_FATAL(pool != NULL);
+ SPDK_CU_ASSERT_FATAL(find_pmemblk_pool(pool->name) == pool);
+ SPDK_CU_ASSERT_FATAL(pool->is_open == true);
+}
+
+void
+pmemblk_close(PMEMblkpool *pool)
+{
+ check_open_pool_fatal(pool);
+ pool->is_open = false;
+ CU_ASSERT(g_opened_pools > 0);
+ g_opened_pools--;
+}
+
+size_t
+pmemblk_bsize(PMEMblkpool *pool)
+{
+ check_open_pool_fatal(pool);
+ return pool->bsize;
+}
+
+size_t
+pmemblk_nblock(PMEMblkpool *pool)
+{
+ check_open_pool_fatal(pool);
+ return pool->nblock;
+}
+
+int
+pmemblk_read(PMEMblkpool *pool, void *buf, long long blockno)
+{
+ check_open_pool_fatal(pool);
+ if (blockno >= pool->nblock) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ memcpy(buf, &pool->buffer[blockno * pool->bsize], pool->bsize);
+ return 0;
+}
+
+int
+pmemblk_write(PMEMblkpool *pool, const void *buf, long long blockno)
+{
+ check_open_pool_fatal(pool);
+ if (blockno >= pool->nblock) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ memcpy(&pool->buffer[blockno * pool->bsize], buf, pool->bsize);
+ return 0;
+}
+
+int
+pmemblk_set_zero(PMEMblkpool *pool, long long blockno)
+{
+ check_open_pool_fatal(pool);
+ if (blockno >= pool->nblock) {
+
+ errno = EINVAL;
+ return -1;
+ }
+
+ memset(&pool->buffer[blockno * pool->bsize], 0, pool->bsize);
+ return 0;
+}
+
+const char *
+pmemblk_errormsg(void)
+{
+ return strerror(errno);
+}
+
+const char *
+pmemblk_check_version(unsigned major_required, unsigned minor_required)
+{
+ return g_check_version_msg;
+}
+
+int
+pmemblk_check(const char *path, size_t bsize)
+{
+ PMEMblkpool *pool = find_pmemblk_pool(path);
+
+ if (!pool) {
+ errno = ENOENT;
+ return -1;
+ }
+
+ if (!pool->is_consistent) {
+ /* errno ? */
+ return 0;
+ }
+
+ if (bsize != 0 && pool->bsize != bsize) {
+ /* errno ? */
+ return 0;
+ }
+
+ return 1;
+}
+
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+ bdev_io->internal.status = status;
+}
+
+int
+spdk_bdev_register(struct spdk_bdev *bdev)
+{
+ CU_ASSERT_PTR_NULL(g_bdev);
+ g_bdev = bdev;
+
+ return 0;
+}
+
+void
+spdk_bdev_unregister(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
+{
+}
+
+void
+spdk_bdev_module_finish_done(void)
+{
+}
+
+int
+spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
+{
+ bdev->blockcnt = size;
+ return 0;
+}
+
+static void
+ut_bdev_pmem_destruct(struct spdk_bdev *bdev)
+{
+ SPDK_CU_ASSERT_FATAL(g_bdev != NULL);
+ CU_ASSERT_EQUAL(bdev_pmem_destruct(bdev->ctxt), 0);
+ g_bdev = NULL;
+}
+
+void
+spdk_bdev_module_list_add(struct spdk_bdev_module *bdev_module)
+{
+ g_bdev_pmem_module = bdev_module;
+ g_bdev_module_cnt++;
+}
+
+static int
+bdev_submit_request(struct spdk_bdev *bdev, int16_t io_type, uint64_t offset_blocks,
+ uint64_t num_blocks, struct iovec *iovs, size_t iov_cnt)
+{
+ struct spdk_bdev_io bio = { 0 };
+
+ switch (io_type) {
+ case SPDK_BDEV_IO_TYPE_READ:
+ bio.u.bdev.iovs = iovs;
+ bio.u.bdev.iovcnt = iov_cnt;
+ bio.u.bdev.offset_blocks = offset_blocks;
+ bio.u.bdev.num_blocks = num_blocks;
+ break;
+ case SPDK_BDEV_IO_TYPE_WRITE:
+ bio.u.bdev.iovs = iovs;
+ bio.u.bdev.iovcnt = iov_cnt;
+ bio.u.bdev.offset_blocks = offset_blocks;
+ bio.u.bdev.num_blocks = num_blocks;
+ break;
+ case SPDK_BDEV_IO_TYPE_FLUSH:
+ bio.u.bdev.offset_blocks = offset_blocks;
+ bio.u.bdev.num_blocks = num_blocks;
+ break;
+ case SPDK_BDEV_IO_TYPE_RESET:
+ break;
+ case SPDK_BDEV_IO_TYPE_UNMAP:
+ bio.u.bdev.offset_blocks = offset_blocks;
+ bio.u.bdev.num_blocks = num_blocks;
+ break;
+ case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
+ bio.u.bdev.offset_blocks = offset_blocks;
+ bio.u.bdev.num_blocks = num_blocks;
+ break;
+ default:
+ CU_FAIL_FATAL("BUG:Unexpected IO type");
+ break;
+ }
+
+ /*
+ * Set status to value that shouldn't be returned
+ */
+ bio.type = io_type;
+ bio.internal.status = SPDK_BDEV_IO_STATUS_PENDING;
+ bio.bdev = bdev;
+ bdev_pmem_submit_request(NULL, &bio);
+ return bio.internal.status;
+}
+
+
+static int
+ut_pmem_blk_clean(void)
+{
+ free(g_pool_ok.buffer);
+ g_pool_ok.buffer = NULL;
+
+ /* Unload module to free IO channel */
+ g_bdev_pmem_module->module_fini();
+
+ spdk_free_thread();
+
+ return 0;
+}
+
+static int
+ut_pmem_blk_init(void)
+{
+ errno = 0;
+
+ spdk_allocate_thread(_pmem_send_msg, NULL, NULL, NULL, NULL);
+
+ g_pool_ok.buffer = calloc(g_pool_ok.nblock, g_pool_ok.bsize);
+ if (g_pool_ok.buffer == NULL) {
+ ut_pmem_blk_clean();
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+ut_pmem_init(void)
+{
+ SPDK_CU_ASSERT_FATAL(g_bdev_pmem_module != NULL);
+ CU_ASSERT_EQUAL(g_bdev_module_cnt, 1);
+
+ /* Make pmemblk_check_version fail with provided error message */
+ g_check_version_msg = "TEST FAIL MESSAGE";
+ CU_ASSERT_NOT_EQUAL(g_bdev_pmem_module->module_init(), 0);
+
+ /* This init must success */
+ g_check_version_msg = NULL;
+ CU_ASSERT_EQUAL(g_bdev_pmem_module->module_init(), 0);
+}
+
+static void
+ut_pmem_open_close(void)
+{
+ struct spdk_bdev *bdev = NULL;
+ int pools_cnt;
+ int rc;
+
+ pools_cnt = g_opened_pools;
+
+ /* Try opening with NULL name */
+ rc = spdk_create_pmem_disk(NULL, NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open non-existent pool */
+ rc = spdk_create_pmem_disk("non existent pool", NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open inconsistent pool */
+ rc = spdk_create_pmem_disk(g_pool_inconsistent.name, NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open consistent pool fail the open from unknown reason. */
+ g_pmemblk_open_allow_open = false;
+ rc = spdk_create_pmem_disk(g_pool_inconsistent.name, NULL, &bdev);
+ g_pmemblk_open_allow_open = true;
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open pool with nblocks = 0 */
+ rc = spdk_create_pmem_disk(g_pool_nblock_0.name, NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open pool with bsize = 0 */
+ rc = spdk_create_pmem_disk(g_pool_bsize_0.name, NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open pool with NULL name */
+ rc = spdk_create_pmem_disk(g_pool_ok.name, NULL, &bdev);
+ CU_ASSERT_PTR_NULL(bdev);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+
+ /* Open good pool */
+ rc = spdk_create_pmem_disk(g_pool_ok.name, g_bdev_name, &bdev);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ CU_ASSERT_TRUE(g_pool_ok.is_open);
+ CU_ASSERT_EQUAL(pools_cnt + 1, g_opened_pools);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /* Now remove this bdev */
+ ut_bdev_pmem_destruct(bdev);
+ CU_ASSERT_FALSE(g_pool_ok.is_open);
+ CU_ASSERT_EQUAL(pools_cnt, g_opened_pools);
+}
+
+static void
+ut_pmem_write_read(void)
+{
+ uint8_t *write_buf, *read_buf;
+ struct spdk_bdev *bdev;
+ int rc;
+ size_t unaligned_aligned_size = 100;
+ size_t buf_size = g_pool_ok.bsize * g_pool_ok.nblock;
+ size_t i;
+ const uint64_t nblock_offset = 10;
+ uint64_t offset;
+ size_t io_size, nblock, total_io_size, bsize;
+
+ bsize = 4096;
+ struct iovec iov[] = {
+ { 0, 2 * bsize },
+ { 0, 3 * bsize },
+ { 0, 4 * bsize },
+ };
+
+ rc = spdk_create_pmem_disk(g_pool_ok.name, g_bdev_name, &bdev);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ SPDK_CU_ASSERT_FATAL(g_pool_ok.nblock > 40);
+
+ write_buf = calloc(1, buf_size);
+ read_buf = calloc(1, buf_size);
+
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ SPDK_CU_ASSERT_FATAL(write_buf != NULL);
+ SPDK_CU_ASSERT_FATAL(read_buf != NULL);
+
+ total_io_size = 0;
+ offset = nblock_offset * g_pool_ok.bsize;
+ for (i = 0; i < 3; i++) {
+ iov[i].iov_base = &write_buf[offset + total_io_size];
+ total_io_size += iov[i].iov_len;
+ }
+
+ for (i = 0; i < total_io_size + unaligned_aligned_size; i++) {
+ write_buf[offset + i] = 0x42 + i;
+ }
+
+ SPDK_CU_ASSERT_FATAL(total_io_size < buf_size);
+
+ /*
+ * Write outside pool.
+ */
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, g_pool_ok.nblock, 1, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+
+ /*
+ * Write with insufficient IOV buffers length.
+ */
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, 0, g_pool_ok.nblock, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+
+ /*
+ * Try to write two IOV with first one iov_len % bsize != 0.
+ */
+ io_size = iov[0].iov_len + iov[1].iov_len;
+ nblock = io_size / g_pool_ok.bsize;
+ iov[0].iov_len += unaligned_aligned_size;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, 0, nblock, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+ iov[0].iov_len -= unaligned_aligned_size;
+
+ /*
+ * Try to write one IOV.
+ */
+ nblock = iov[0].iov_len / g_pool_ok.bsize;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, nblock_offset, nblock, &iov[0], 1);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /*
+ * Try to write 2 IOV.
+ * Sum of IOV length is larger than IO size and last IOV is larger and iov_len % bsize != 0
+ */
+ offset = iov[0].iov_len / g_pool_ok.bsize;
+ io_size = iov[1].iov_len + iov[2].iov_len;
+ nblock = io_size / g_pool_ok.bsize;
+ iov[2].iov_len += unaligned_aligned_size;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_WRITE, nblock_offset + offset, nblock,
+ &iov[1], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+ iov[2].iov_len -= unaligned_aligned_size;
+
+ /*
+ * Examine pool state:
+ * 1. Written area should have expected values.
+ * 2. Anything else should contain zeros.
+ */
+ offset = nblock_offset * g_pool_ok.bsize + total_io_size;
+ rc = memcmp(&g_pool_ok.buffer[0], write_buf, offset);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ for (i = offset; i < buf_size; i++) {
+ if (g_pool_ok.buffer[i] != 0) {
+ CU_ASSERT_EQUAL(g_pool_ok.buffer[i], 0);
+ break;
+ }
+ }
+
+ /* Setup IOV for reads */
+ memset(read_buf, 0xAB, buf_size);
+ offset = nblock_offset * g_pool_ok.bsize;
+ for (i = 0; i < 3; i++) {
+ iov[i].iov_base = &read_buf[offset];
+ offset += iov[i].iov_len;
+ }
+
+ /*
+ * Write outside pool.
+ */
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, g_pool_ok.nblock, 1, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+
+ /*
+ * Read with insufficient IOV buffers length.
+ */
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, 0, g_pool_ok.nblock, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+
+ /*
+ * Try to read two IOV with first one iov_len % bsize != 0.
+ */
+ io_size = iov[0].iov_len + iov[1].iov_len;
+ nblock = io_size / g_pool_ok.bsize;
+ iov[0].iov_len += unaligned_aligned_size;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, 0, nblock, &iov[0], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+ iov[0].iov_len -= unaligned_aligned_size;
+
+ /*
+ * Try to write one IOV.
+ */
+ nblock = iov[0].iov_len / g_pool_ok.bsize;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, nblock_offset, nblock, &iov[0], 1);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ /*
+ * Try to read 2 IOV.
+ * Sum of IOV length is larger than IO size and last IOV is larger and iov_len % bsize != 0
+ */
+ offset = iov[0].iov_len / g_pool_ok.bsize;
+ io_size = iov[1].iov_len + iov[2].iov_len;
+ nblock = io_size / g_pool_ok.bsize;
+ iov[2].iov_len += unaligned_aligned_size;
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_READ, nblock_offset + offset, nblock,
+ &iov[1], 2);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+ iov[2].iov_len -= unaligned_aligned_size;
+
+
+ /*
+ * Examine what we read state:
+ * 1. Written area should have expected values.
+ * 2. Anything else should contain zeros.
+ */
+ offset = nblock_offset * g_pool_ok.bsize;
+ for (i = 0; i < offset; i++) {
+ if (read_buf[i] != 0xAB) {
+ CU_ASSERT_EQUAL(read_buf[i], 0xAB);
+ break;
+ }
+ }
+
+ rc = memcmp(&read_buf[offset], &write_buf[offset], total_io_size);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ offset += total_io_size;
+ for (i = offset; i < buf_size; i++) {
+ if (read_buf[i] != 0xAB) {
+ CU_ASSERT_EQUAL(read_buf[i], 0xAB);
+ break;
+ }
+ }
+
+ memset(g_pool_ok.buffer, 0, g_pool_ok.bsize * g_pool_ok.nblock);
+ free(write_buf);
+ free(read_buf);
+
+ /* Now remove this bdev */
+ ut_bdev_pmem_destruct(bdev);
+ CU_ASSERT_FALSE(g_pool_ok.is_open);
+ CU_ASSERT_EQUAL(g_opened_pools, 0);
+}
+
+static void
+ut_pmem_reset(void)
+{
+ struct spdk_bdev *bdev;
+ int rc;
+
+ rc = spdk_create_pmem_disk(g_pool_ok.name, g_bdev_name, &bdev);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+
+ rc = bdev_submit_request(bdev, SPDK_BDEV_IO_TYPE_RESET, 0, 0, NULL, 0);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ ut_bdev_pmem_destruct(bdev);
+}
+
+static void
+ut_pmem_unmap_write_zero(int16_t io_type)
+{
+ struct spdk_bdev *bdev;
+ size_t buff_size = g_pool_ok.nblock * g_pool_ok.bsize;
+ size_t i;
+ uint8_t *buffer;
+ int rc;
+
+ CU_ASSERT(io_type == SPDK_BDEV_IO_TYPE_UNMAP || io_type == SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
+ rc = spdk_create_pmem_disk(g_pool_ok.name, g_bdev_name, &bdev);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(bdev != NULL);
+ SPDK_CU_ASSERT_FATAL(g_pool_ok.nblock > 40);
+
+ buffer = calloc(1, buff_size);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ for (i = 10 * g_pool_ok.bsize; i < 30 * g_pool_ok.bsize; i++) {
+ buffer[i] = 0x30 + io_type + i;
+ }
+ memcpy(g_pool_ok.buffer, buffer, buff_size);
+
+ /*
+ * Block outside of pool.
+ */
+ rc = bdev_submit_request(bdev, io_type, g_pool_ok.nblock, 1, NULL, 0);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_FAILED);
+
+ rc = memcmp(buffer, g_pool_ok.buffer, buff_size);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /*
+ * Blocks 15 to 25
+ */
+ memset(&buffer[15 * g_pool_ok.bsize], 0, 10 * g_pool_ok.bsize);
+ rc = bdev_submit_request(bdev, io_type, 15, 10, NULL, 0);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ rc = memcmp(buffer, g_pool_ok.buffer, buff_size);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /*
+ * All blocks.
+ */
+ memset(buffer, 0, buff_size);
+ rc = bdev_submit_request(bdev, io_type, 0, g_pool_ok.nblock, NULL, 0);
+ CU_ASSERT_EQUAL(rc, SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ rc = memcmp(buffer, g_pool_ok.buffer, buff_size);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /* Now remove this bdev */
+ ut_bdev_pmem_destruct(bdev);
+ CU_ASSERT_FALSE(g_pool_ok.is_open);
+ CU_ASSERT_EQUAL(g_opened_pools, 0);
+
+ free(buffer);
+}
+
+static void
+ut_pmem_write_zero(void)
+{
+ ut_pmem_unmap_write_zero(SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
+}
+
+static void
+ut_pmem_unmap(void)
+{
+ ut_pmem_unmap_write_zero(SPDK_BDEV_IO_TYPE_UNMAP);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("bdev_pmem", ut_pmem_blk_init, ut_pmem_blk_clean);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "ut_pmem_init", ut_pmem_init) == NULL ||
+ CU_add_test(suite, "ut_pmem_open_close", ut_pmem_open_close) == NULL ||
+ CU_add_test(suite, "ut_pmem_write_read", ut_pmem_write_read) == NULL ||
+ CU_add_test(suite, "ut_pmem_reset", ut_pmem_reset) == NULL ||
+ CU_add_test(suite, "ut_pmem_write_zero", ut_pmem_write_zero) == NULL ||
+ CU_add_test(suite, "ut_pmem_unmap", ut_pmem_unmap) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/scsi_nvme.c/.gitignore b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/.gitignore
new file mode 100644
index 00000000..75800527
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/.gitignore
@@ -0,0 +1 @@
+scsi_nvme_ut
diff --git a/src/spdk/test/unit/lib/bdev/scsi_nvme.c/Makefile b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/Makefile
new file mode 100644
index 00000000..0c908148
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2016 FUJITSU LIMITED, All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = scsi_nvme_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/scsi_nvme.c/scsi_nvme_ut.c b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/scsi_nvme_ut.c
new file mode 100644
index 00000000..9b2eff35
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/scsi_nvme.c/scsi_nvme_ut.c
@@ -0,0 +1,142 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 FUJITSU LIMITED, All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "bdev/scsi_nvme.c"
+
+static int
+null_init(void)
+{
+ return 0;
+}
+
+static int
+null_clean(void)
+{
+ return 0;
+}
+
+static void
+scsi_nvme_translate_test(void)
+{
+ struct spdk_bdev_io bdev_io;
+ int sc, sk, asc, ascq;
+
+ /* SPDK_NVME_SCT_GENERIC */
+ bdev_io.internal.error.nvme.sct = SPDK_NVME_SCT_GENERIC;
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_ABORTED_POWER_LOSS;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_TASK_ABORTED);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ABORTED_COMMAND);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_WARNING);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_POWER_LOSS_EXPECTED);
+
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+
+ /* SPDK_NVME_SCT_COMMAND_SPECIFIC */
+ bdev_io.internal.error.nvme.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_INVALID_FORMAT;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_FORMAT_COMMAND_FAILED);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_FORMAT_COMMAND_FAILED);
+
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_OVERLAPPING_RANGE;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+
+ /* SPDK_NVME_SCT_MEDIA_ERROR */
+ bdev_io.internal.error.nvme.sct = SPDK_NVME_SCT_MEDIA_ERROR;
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_GUARD_CHECK_ERROR;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_MEDIUM_ERROR);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_LOGICAL_BLOCK_GUARD_CHECK_FAILED);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_LOGICAL_BLOCK_GUARD_CHECK_FAILED);
+
+ bdev_io.internal.error.nvme.sc = SPDK_NVME_SC_DEALLOCATED_OR_UNWRITTEN_BLOCK;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+
+ /* SPDK_NVME_SCT_VENDOR_SPECIFIC */
+ bdev_io.internal.error.nvme.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
+ bdev_io.internal.error.nvme.sc = 0xff;
+ spdk_scsi_nvme_translate(&bdev_io, &sc, &sk, &asc, &ascq);
+ CU_ASSERT_EQUAL(sc, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(sk, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(asc, SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE);
+ CU_ASSERT_EQUAL(ascq, SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("scsi_nvme_suite", null_init, null_clean);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "scsi_nvme - translate nvme error to scsi error",
+ scsi_nvme_translate_test) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/.gitignore b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/.gitignore
new file mode 100644
index 00000000..5f2f6fdf
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/.gitignore
@@ -0,0 +1 @@
+vbdev_lvol_ut
diff --git a/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/Makefile b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/Makefile
new file mode 100644
index 00000000..c2e6b99e
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../../)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = vbdev_lvol_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut.c b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut.c
new file mode 100644
index 00000000..2500378b
--- /dev/null
+++ b/src/spdk/test/unit/lib/bdev/vbdev_lvol.c/vbdev_lvol_ut.c
@@ -0,0 +1,1410 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+#include "spdk/string.h"
+
+#include "bdev/lvol/vbdev_lvol.c"
+
+#define SPDK_BS_PAGE_SIZE 0x1000
+
+int g_lvolerrno;
+int g_lvserrno;
+int g_cluster_size;
+int g_registered_bdevs;
+int g_num_lvols = 0;
+struct spdk_lvol_store *g_lvs = NULL;
+struct spdk_lvol *g_lvol = NULL;
+struct lvol_store_bdev *g_lvs_bdev = NULL;
+struct spdk_bdev *g_base_bdev = NULL;
+struct spdk_bdev_io *g_io = NULL;
+struct spdk_io_channel *g_ch = NULL;
+struct lvol_task *g_task = NULL;
+
+static struct spdk_bdev g_bdev = {};
+static struct spdk_lvol_store *g_lvol_store = NULL;
+bool lvol_store_initialize_fail = false;
+bool lvol_store_initialize_cb_fail = false;
+bool lvol_already_opened = false;
+bool g_examine_done = false;
+bool g_bdev_alias_already_exists = false;
+bool g_lvs_with_name_already_exists = false;
+bool g_lvol_deletable = true;
+
+int
+spdk_bdev_alias_add(struct spdk_bdev *bdev, const char *alias)
+{
+ struct spdk_bdev_alias *tmp;
+
+ CU_ASSERT(alias != NULL);
+ CU_ASSERT(bdev != NULL);
+ if (g_bdev_alias_already_exists) {
+ return -EEXIST;
+ }
+
+ tmp = calloc(1, sizeof(*tmp));
+ SPDK_CU_ASSERT_FATAL(tmp != NULL);
+
+ tmp->alias = strdup(alias);
+ SPDK_CU_ASSERT_FATAL(tmp->alias != NULL);
+
+ TAILQ_INSERT_TAIL(&bdev->aliases, tmp, tailq);
+
+ return 0;
+}
+
+int
+spdk_bdev_alias_del(struct spdk_bdev *bdev, const char *alias)
+{
+ struct spdk_bdev_alias *tmp;
+
+ CU_ASSERT(alias != NULL);
+ CU_ASSERT(bdev != NULL);
+
+ TAILQ_FOREACH(tmp, &bdev->aliases, tailq) {
+ if (strncmp(alias, tmp->alias, SPDK_LVOL_NAME_MAX) == 0) {
+ TAILQ_REMOVE(&bdev->aliases, tmp, tailq);
+ free(tmp->alias);
+ free(tmp);
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+void
+spdk_bdev_alias_del_all(struct spdk_bdev *bdev)
+{
+ struct spdk_bdev_alias *p, *tmp;
+
+ TAILQ_FOREACH_SAFE(p, &bdev->aliases, tailq, tmp) {
+ TAILQ_REMOVE(&bdev->aliases, p, tailq);
+ free(p->alias);
+ free(p);
+ }
+}
+
+void
+spdk_bdev_destruct_done(struct spdk_bdev *bdev, int bdeverrno)
+{
+}
+
+void
+spdk_lvs_rename(struct spdk_lvol_store *lvs, const char *new_name,
+ spdk_lvs_op_complete cb_fn, void *cb_arg)
+{
+ if (g_lvs_with_name_already_exists) {
+ g_lvolerrno = -EEXIST;
+ } else {
+ snprintf(lvs->name, sizeof(lvs->name), "%s", new_name);
+ g_lvolerrno = 0;
+ }
+
+ cb_fn(cb_arg, g_lvolerrno);
+}
+
+void
+spdk_lvol_rename(struct spdk_lvol *lvol, const char *new_name,
+ spdk_lvol_op_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol *tmp;
+
+ if (strncmp(lvol->name, new_name, SPDK_LVOL_NAME_MAX) == 0) {
+ cb_fn(cb_arg, 0);
+ return;
+ }
+
+ TAILQ_FOREACH(tmp, &lvol->lvol_store->lvols, link) {
+ if (strncmp(tmp->name, new_name, SPDK_LVOL_NAME_MAX) == 0) {
+ SPDK_ERRLOG("Lvol %s already exists in lvol store %s\n", new_name, lvol->lvol_store->name);
+ cb_fn(cb_arg, -EEXIST);
+ return;
+ }
+ }
+
+ snprintf(lvol->name, sizeof(lvol->name), "%s", new_name);
+
+ cb_fn(cb_arg, g_lvolerrno);
+}
+
+void
+spdk_lvol_open(struct spdk_lvol *lvol, spdk_lvol_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, lvol, g_lvolerrno);
+}
+
+uint64_t
+spdk_blob_get_num_clusters(struct spdk_blob *b)
+{
+ return 0;
+}
+
+int
+spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids,
+ size_t *count)
+{
+ *count = 0;
+ return 0;
+}
+
+spdk_blob_id
+spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid)
+{
+ return 0;
+}
+
+bool g_blob_is_read_only = false;
+
+bool
+spdk_blob_is_read_only(struct spdk_blob *blob)
+{
+ return g_blob_is_read_only;
+}
+
+bool
+spdk_blob_is_snapshot(struct spdk_blob *blob)
+{
+ return false;
+}
+
+bool
+spdk_blob_is_clone(struct spdk_blob *blob)
+{
+ return false;
+}
+
+bool
+spdk_blob_is_thin_provisioned(struct spdk_blob *blob)
+{
+ return false;
+}
+
+static struct spdk_lvol *_lvol_create(struct spdk_lvol_store *lvs);
+
+void
+spdk_lvs_load(struct spdk_bs_dev *dev,
+ spdk_lvs_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol_store *lvs = NULL;
+ int i;
+ int lvserrno = g_lvserrno;
+
+ if (lvserrno != 0) {
+ /* On error blobstore destroys bs_dev itself,
+ * by puttin back io channels.
+ * This operation is asynchronous, and completed
+ * after calling the callback for lvol. */
+ cb_fn(cb_arg, g_lvol_store, lvserrno);
+ dev->destroy(dev);
+ return;
+ }
+
+ lvs = calloc(1, sizeof(*lvs));
+ SPDK_CU_ASSERT_FATAL(lvs != NULL);
+ TAILQ_INIT(&lvs->lvols);
+ TAILQ_INIT(&lvs->pending_lvols);
+ spdk_uuid_generate(&lvs->uuid);
+ lvs->bs_dev = dev;
+ for (i = 0; i < g_num_lvols; i++) {
+ _lvol_create(lvs);
+ }
+
+ cb_fn(cb_arg, lvs, lvserrno);
+}
+
+int
+spdk_bs_bdev_claim(struct spdk_bs_dev *bs_dev, struct spdk_bdev_module *module)
+{
+ if (lvol_already_opened == true) {
+ return -1;
+ }
+
+ lvol_already_opened = true;
+
+ return 0;
+}
+
+void
+spdk_bdev_unregister(struct spdk_bdev *vbdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg)
+{
+ int rc;
+
+ SPDK_CU_ASSERT_FATAL(vbdev != NULL);
+ rc = vbdev->fn_table->destruct(vbdev->ctxt);
+
+ SPDK_CU_ASSERT_FATAL(cb_fn != NULL);
+ cb_fn(cb_arg, rc);
+}
+
+void
+spdk_bdev_module_finish_done(void)
+{
+ return;
+}
+
+uint64_t
+spdk_bs_get_page_size(struct spdk_blob_store *bs)
+{
+ return SPDK_BS_PAGE_SIZE;
+}
+
+uint64_t
+spdk_bs_get_io_unit_size(struct spdk_blob_store *bs)
+{
+ return SPDK_BS_PAGE_SIZE;
+}
+
+static void
+bdev_blob_destroy(struct spdk_bs_dev *bs_dev)
+{
+ CU_ASSERT(bs_dev != NULL);
+ free(bs_dev);
+ lvol_already_opened = false;
+}
+
+struct spdk_bs_dev *
+spdk_bdev_create_bs_dev(struct spdk_bdev *bdev, spdk_bdev_remove_cb_t remove_cb, void *remove_ctx)
+{
+ struct spdk_bs_dev *bs_dev;
+
+ if (lvol_already_opened == true || bdev == NULL) {
+ return NULL;
+ }
+
+ bs_dev = calloc(1, sizeof(*bs_dev));
+ SPDK_CU_ASSERT_FATAL(bs_dev != NULL);
+ bs_dev->destroy = bdev_blob_destroy;
+
+ return bs_dev;
+}
+
+void
+spdk_lvs_opts_init(struct spdk_lvs_opts *opts)
+{
+}
+
+int
+spdk_lvs_init(struct spdk_bs_dev *bs_dev, struct spdk_lvs_opts *o,
+ spdk_lvs_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol_store *lvs;
+ int error = 0;
+
+ if (lvol_store_initialize_fail) {
+ return -1;
+ }
+
+ if (lvol_store_initialize_cb_fail) {
+ bs_dev->destroy(bs_dev);
+ lvs = NULL;
+ error = -1;
+ } else {
+ lvs = calloc(1, sizeof(*lvs));
+ SPDK_CU_ASSERT_FATAL(lvs != NULL);
+ TAILQ_INIT(&lvs->lvols);
+ TAILQ_INIT(&lvs->pending_lvols);
+ spdk_uuid_generate(&lvs->uuid);
+ snprintf(lvs->name, sizeof(lvs->name), "%s", o->name);
+ lvs->bs_dev = bs_dev;
+ error = 0;
+ }
+ cb_fn(cb_arg, lvs, error);
+
+ return 0;
+}
+
+int
+spdk_lvs_unload(struct spdk_lvol_store *lvs, spdk_lvs_op_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol *lvol, *tmp;
+
+ TAILQ_FOREACH_SAFE(lvol, &lvs->lvols, link, tmp) {
+ TAILQ_REMOVE(&lvs->lvols, lvol, link);
+ free(lvol->unique_id);
+ free(lvol);
+ }
+ g_lvol_store = NULL;
+
+ lvs->bs_dev->destroy(lvs->bs_dev);
+ free(lvs);
+
+ if (cb_fn != NULL) {
+ cb_fn(cb_arg, 0);
+ }
+
+ return 0;
+}
+
+int
+spdk_lvs_destroy(struct spdk_lvol_store *lvs, spdk_lvs_op_complete cb_fn,
+ void *cb_arg)
+{
+ struct spdk_lvol *lvol, *tmp;
+ char *alias;
+
+ TAILQ_FOREACH_SAFE(lvol, &lvs->lvols, link, tmp) {
+ TAILQ_REMOVE(&lvs->lvols, lvol, link);
+
+ alias = spdk_sprintf_alloc("%s/%s", lvs->name, lvol->name);
+ if (alias == NULL) {
+ SPDK_ERRLOG("Cannot alloc memory for alias\n");
+ return -1;
+ }
+ spdk_bdev_alias_del(lvol->bdev, alias);
+
+ free(alias);
+ free(lvol->unique_id);
+ free(lvol);
+ }
+ g_lvol_store = NULL;
+
+ lvs->bs_dev->destroy(lvs->bs_dev);
+ free(lvs);
+
+ if (cb_fn != NULL) {
+ cb_fn(cb_arg, 0);
+ }
+
+ return 0;
+}
+
+void
+spdk_lvol_resize(struct spdk_lvol *lvol, size_t sz, spdk_lvol_op_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, 0);
+}
+
+int
+spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
+{
+ bdev->blockcnt = size;
+ return 0;
+}
+
+uint64_t
+spdk_bs_get_cluster_size(struct spdk_blob_store *bs)
+{
+ return g_cluster_size;
+}
+
+struct spdk_bdev *
+spdk_bdev_get_by_name(const char *bdev_name)
+{
+ if (!strcmp(g_base_bdev->name, bdev_name)) {
+ return g_base_bdev;
+ }
+
+ return NULL;
+}
+
+void
+spdk_lvol_close(struct spdk_lvol *lvol, spdk_lvol_op_complete cb_fn, void *cb_arg)
+{
+ lvol->ref_count--;
+
+ SPDK_CU_ASSERT_FATAL(cb_fn != NULL);
+ cb_fn(cb_arg, 0);
+}
+
+bool
+spdk_lvol_deletable(struct spdk_lvol *lvol)
+{
+ return g_lvol_deletable;
+}
+
+void
+spdk_lvol_destroy(struct spdk_lvol *lvol, spdk_lvol_op_complete cb_fn, void *cb_arg)
+{
+ if (lvol->ref_count != 0) {
+ cb_fn(cb_arg, -ENODEV);
+ }
+
+ TAILQ_REMOVE(&lvol->lvol_store->lvols, lvol, link);
+
+ SPDK_CU_ASSERT_FATAL(cb_fn != NULL);
+ cb_fn(cb_arg, 0);
+
+ g_lvol = NULL;
+ free(lvol->unique_id);
+ free(lvol);
+}
+
+void
+spdk_bdev_io_complete(struct spdk_bdev_io *bdev_io, enum spdk_bdev_io_status status)
+{
+}
+
+struct spdk_io_channel *spdk_lvol_get_io_channel(struct spdk_lvol *lvol)
+{
+ CU_ASSERT(lvol == g_lvol);
+ return g_ch;
+}
+
+void
+spdk_bdev_io_get_buf(struct spdk_bdev_io *bdev_io, spdk_bdev_io_get_buf_cb cb, uint64_t len)
+{
+ CU_ASSERT(cb == lvol_read);
+}
+
+void
+spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ void *payload, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+}
+
+void
+spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ void *payload, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+}
+
+void
+spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ CU_ASSERT(blob == NULL);
+ CU_ASSERT(channel == g_ch);
+ CU_ASSERT(offset == g_io->u.bdev.offset_blocks);
+ CU_ASSERT(length == g_io->u.bdev.num_blocks);
+}
+
+void
+spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ CU_ASSERT(blob == NULL);
+ CU_ASSERT(channel == g_ch);
+ CU_ASSERT(offset == g_io->u.bdev.offset_blocks);
+ CU_ASSERT(length == g_io->u.bdev.num_blocks);
+}
+
+void
+spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ CU_ASSERT(blob == NULL);
+ CU_ASSERT(channel == g_ch);
+ CU_ASSERT(offset == g_io->u.bdev.offset_blocks);
+ CU_ASSERT(length == g_io->u.bdev.num_blocks);
+}
+
+void
+spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ CU_ASSERT(blob == NULL);
+ CU_ASSERT(channel == g_ch);
+ CU_ASSERT(offset == g_io->u.bdev.offset_blocks);
+ CU_ASSERT(length == g_io->u.bdev.num_blocks);
+}
+
+void
+spdk_bdev_module_list_add(struct spdk_bdev_module *bdev_module)
+{
+}
+
+int
+spdk_json_write_name(struct spdk_json_write_ctx *w, const char *name)
+{
+ return 0;
+}
+
+int
+spdk_json_write_array_begin(struct spdk_json_write_ctx *w)
+{
+ return 0;
+}
+
+int
+spdk_json_write_array_end(struct spdk_json_write_ctx *w)
+{
+ return 0;
+}
+
+int
+spdk_json_write_string(struct spdk_json_write_ctx *w, const char *val)
+{
+ return 0;
+}
+
+int
+spdk_json_write_bool(struct spdk_json_write_ctx *w, bool val)
+{
+ return 0;
+}
+
+int
+spdk_json_write_object_begin(struct spdk_json_write_ctx *w)
+{
+ return 0;
+}
+
+int
+spdk_json_write_object_end(struct spdk_json_write_ctx *w)
+{
+ return 0;
+}
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return "test";
+}
+
+int
+spdk_vbdev_register(struct spdk_bdev *vbdev, struct spdk_bdev **base_bdevs, int base_bdev_count)
+{
+ TAILQ_INIT(&vbdev->aliases);
+
+ g_registered_bdevs++;
+ return 0;
+}
+
+void
+spdk_bdev_module_examine_done(struct spdk_bdev_module *module)
+{
+ SPDK_CU_ASSERT_FATAL(g_examine_done != true);
+ g_examine_done = true;
+}
+
+static struct spdk_lvol *
+_lvol_create(struct spdk_lvol_store *lvs)
+{
+ struct spdk_lvol *lvol = calloc(1, sizeof(*lvol));
+
+ SPDK_CU_ASSERT_FATAL(lvol != NULL);
+
+ lvol->lvol_store = lvs;
+ lvol->ref_count++;
+ lvol->unique_id = spdk_sprintf_alloc("%s", "UNIT_TEST_UUID");
+ SPDK_CU_ASSERT_FATAL(lvol->unique_id != NULL);
+
+ TAILQ_INSERT_TAIL(&lvol->lvol_store->lvols, lvol, link);
+
+ return lvol;
+}
+
+int
+spdk_lvol_create(struct spdk_lvol_store *lvs, const char *name, size_t sz,
+ bool thin_provision, spdk_lvol_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol *lvol;
+
+ lvol = _lvol_create(lvs);
+ snprintf(lvol->name, sizeof(lvol->name), "%s", name);
+ cb_fn(cb_arg, lvol, 0);
+
+ return 0;
+}
+
+void
+spdk_lvol_create_snapshot(struct spdk_lvol *lvol, const char *snapshot_name,
+ spdk_lvol_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol *snap;
+
+ snap = _lvol_create(lvol->lvol_store);
+ snprintf(snap->name, sizeof(snap->name), "%s", snapshot_name);
+ cb_fn(cb_arg, snap, 0);
+}
+
+void
+spdk_lvol_create_clone(struct spdk_lvol *lvol, const char *clone_name,
+ spdk_lvol_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_lvol *clone;
+
+ clone = _lvol_create(lvol->lvol_store);
+ snprintf(clone->name, sizeof(clone->name), "%s", clone_name);
+ cb_fn(cb_arg, clone, 0);
+}
+
+static void
+lvol_store_op_complete(void *cb_arg, int lvserrno)
+{
+ g_lvserrno = lvserrno;
+ return;
+}
+
+static void
+lvol_store_op_with_handle_complete(void *cb_arg, struct spdk_lvol_store *lvs, int lvserrno)
+{
+ g_lvserrno = lvserrno;
+ g_lvol_store = lvs;
+ return;
+}
+
+static void
+vbdev_lvol_create_complete(void *cb_arg, struct spdk_lvol *lvol, int lvolerrno)
+{
+ g_lvolerrno = lvolerrno;
+ g_lvol = lvol;
+}
+
+static void
+vbdev_lvol_resize_complete(void *cb_arg, int lvolerrno)
+{
+ g_lvolerrno = lvolerrno;
+}
+
+static void
+vbdev_lvol_rename_complete(void *cb_arg, int lvolerrno)
+{
+ g_lvolerrno = lvolerrno;
+}
+
+static void
+ut_lvs_destroy(void)
+{
+ int rc = 0;
+ int sz = 10;
+ struct spdk_lvol_store *lvs;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+
+ lvs = g_lvol_store;
+ g_lvol_store = NULL;
+
+ spdk_uuid_generate(&lvs->uuid);
+
+ /* Successfully create lvol, which should be unloaded with lvs later */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, vbdev_lvol_create_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvolerrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ /* Unload lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_init(void)
+{
+ struct spdk_lvol_store *lvs;
+ int sz = 10;
+ int rc;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvol create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ /* Successful lvol destroy */
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_snapshot(void)
+{
+ struct spdk_lvol_store *lvs;
+ int sz = 10;
+ int rc;
+ struct spdk_lvol *lvol = NULL;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvol create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ lvol = g_lvol;
+
+ /* Successful snap create */
+ vbdev_lvol_create_snapshot(lvol, "snap", vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ /* Successful lvol destroy */
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Successful snap destroy */
+ g_lvol = lvol;
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_clone(void)
+{
+ struct spdk_lvol_store *lvs;
+ int sz = 10;
+ int rc;
+ struct spdk_lvol *lvol = NULL;
+ struct spdk_lvol *snap = NULL;
+ struct spdk_lvol *clone = NULL;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvol create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ lvol = g_lvol;
+
+ /* Successful snap create */
+ vbdev_lvol_create_snapshot(lvol, "snap", vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ snap = g_lvol;
+
+ /* Successful clone create */
+ vbdev_lvol_create_clone(snap, "clone", vbdev_lvol_create_complete, NULL);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ clone = g_lvol;
+
+ /* Successful lvol destroy */
+ g_lvol = lvol;
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Successful clone destroy */
+ g_lvol = clone;
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Successful lvol destroy */
+ g_lvol = snap;
+ vbdev_lvol_destroy(g_lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_hotremove(void)
+{
+ int rc = 0;
+
+ lvol_store_initialize_fail = false;
+ lvol_store_initialize_cb_fail = false;
+ lvol_already_opened = false;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+
+ /* Hot remove callback with NULL - stability check */
+ vbdev_lvs_hotremove_cb(NULL);
+
+ /* Hot remove lvs on bdev removal */
+ vbdev_lvs_hotremove_cb(&g_bdev);
+
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_lvol_pairs));
+
+}
+
+static void
+ut_lvs_examine_check(bool success)
+{
+ struct lvol_store_bdev *lvs_bdev;
+
+ /* Examine was finished regardless of result */
+ CU_ASSERT(g_examine_done == true);
+ g_examine_done = false;
+
+ if (success) {
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_spdk_lvol_pairs));
+ lvs_bdev = TAILQ_FIRST(&g_spdk_lvol_pairs);
+ SPDK_CU_ASSERT_FATAL(lvs_bdev != NULL);
+ g_lvol_store = lvs_bdev->lvs;
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ } else {
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_spdk_lvol_pairs));
+ g_lvol_store = NULL;
+ }
+}
+
+static void
+ut_lvol_examine(void)
+{
+ /* Examine unsuccessfully - bdev already opened */
+ g_lvserrno = -1;
+ lvol_already_opened = true;
+ vbdev_lvs_examine(&g_bdev);
+ ut_lvs_examine_check(false);
+
+ /* Examine unsuccessfully - fail on lvol store */
+ g_lvserrno = -1;
+ lvol_already_opened = false;
+ vbdev_lvs_examine(&g_bdev);
+ ut_lvs_examine_check(false);
+
+ /* Examine successfully
+ * - one lvol fails to load
+ * - lvs is loaded with no lvols present */
+ g_lvserrno = 0;
+ g_lvolerrno = -1;
+ g_num_lvols = 1;
+ lvol_already_opened = false;
+ g_registered_bdevs = 0;
+ vbdev_lvs_examine(&g_bdev);
+ ut_lvs_examine_check(true);
+ CU_ASSERT(g_registered_bdevs == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_store->lvols));
+ vbdev_lvs_destruct(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ /* Examine successfully */
+ g_lvserrno = 0;
+ g_lvolerrno = 0;
+ g_registered_bdevs = 0;
+ lvol_already_opened = false;
+ vbdev_lvs_examine(&g_bdev);
+ ut_lvs_examine_check(true);
+ CU_ASSERT(g_registered_bdevs != 0);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_lvol_store->lvols));
+ vbdev_lvs_destruct(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+}
+
+static void
+ut_lvol_rename(void)
+{
+ struct spdk_lvol_store *lvs;
+ struct spdk_lvol *lvol;
+ struct spdk_lvol *lvol2;
+ int sz = 10;
+ int rc;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvols create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+ lvol = g_lvol;
+
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol2", sz, false, vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+ lvol2 = g_lvol;
+
+ /* Successful rename lvol */
+ vbdev_lvol_rename(lvol, "new_lvol_name", vbdev_lvol_rename_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_lvolerrno == 0);
+ CU_ASSERT_STRING_EQUAL(lvol->name, "new_lvol_name");
+
+ /* Renaming lvol with name already existing */
+ g_bdev_alias_already_exists = true;
+ vbdev_lvol_rename(lvol2, "new_lvol_name", vbdev_lvol_rename_complete, NULL);
+ g_bdev_alias_already_exists = false;
+ SPDK_CU_ASSERT_FATAL(g_lvolerrno != 0);
+ CU_ASSERT_STRING_NOT_EQUAL(lvol2->name, "new_lvol_name");
+
+ /* Renaming lvol with it's own name */
+ vbdev_lvol_rename(lvol, "new_lvol_name", vbdev_lvol_rename_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_lvolerrno == 0);
+ CU_ASSERT_STRING_EQUAL(lvol->name, "new_lvol_name");
+
+ /* Successful lvols destroy */
+ vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ vbdev_lvol_destroy(lvol2, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_destroy(void)
+{
+ struct spdk_lvol_store *lvs;
+ struct spdk_lvol *lvol;
+ struct spdk_lvol *lvol2;
+ int sz = 10;
+ int rc;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvols create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+ lvol = g_lvol;
+
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol2", sz, false, vbdev_lvol_create_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+ lvol2 = g_lvol;
+
+ /* Unsuccessful lvols destroy */
+ g_lvol_deletable = false;
+ vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol != NULL);
+ CU_ASSERT(g_lvserrno == -EPERM);
+
+ g_lvol_deletable = true;
+ /* Successful lvols destroy */
+ vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ /* Hot remove lvol bdev */
+ vbdev_lvol_unregister(lvol2);
+
+ /* Unload lvol store */
+ vbdev_lvs_unload(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvol_resize(void)
+{
+ struct spdk_lvol_store *lvs;
+ struct spdk_lvol *lvol;
+ int sz = 10;
+ int rc = 0;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+ lvs = g_lvol_store;
+
+ /* Successful lvol create */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, vbdev_lvol_create_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvolerrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol = g_lvol;
+
+ /* Successful lvol resize */
+ g_lvolerrno = -1;
+ vbdev_lvol_resize(lvol, 20, vbdev_lvol_resize_complete, NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+ CU_ASSERT(lvol->bdev->blockcnt == 20 * g_cluster_size / lvol->bdev->blocklen);
+
+ /* Resize with NULL lvol */
+ vbdev_lvol_resize(NULL, 20, vbdev_lvol_resize_complete, NULL);
+ CU_ASSERT(g_lvolerrno != 0);
+
+ /* Successful lvol destroy */
+ vbdev_lvol_destroy(lvol, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvol == NULL);
+
+ /* Destroy lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_lvs_unload(void)
+{
+ int rc = 0;
+ int sz = 10;
+ struct spdk_lvol_store *lvs;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+
+ lvs = g_lvol_store;
+ g_lvol_store = NULL;
+
+ spdk_uuid_generate(&lvs->uuid);
+
+ /* Successfully create lvol, which should be destroyed with lvs later */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, vbdev_lvol_create_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvolerrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ /* Unload lvol store */
+ vbdev_lvs_unload(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(g_lvol != NULL);
+}
+
+static void
+ut_lvs_init(void)
+{
+ int rc = 0;
+ struct spdk_lvol_store *lvs;
+
+ /* spdk_lvs_init() fails */
+ lvol_store_initialize_fail = true;
+
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ lvol_store_initialize_fail = false;
+
+ /* spdk_lvs_init_cb() fails */
+ lvol_store_initialize_cb_fail = true;
+
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ lvol_store_initialize_cb_fail = false;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+
+ lvs = g_lvol_store;
+ g_lvol_store = NULL;
+
+ /* Bdev with lvol store already claimed */
+ rc = vbdev_lvs_create(&g_bdev, "lvs", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ /* Destruct lvol store */
+ vbdev_lvs_destruct(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+}
+
+static void
+ut_vbdev_lvol_get_io_channel(void)
+{
+ struct spdk_io_channel *ch;
+
+ g_lvol = calloc(1, sizeof(struct spdk_lvol));
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ ch = vbdev_lvol_get_io_channel(g_lvol);
+ CU_ASSERT(ch == g_ch);
+
+ free(g_lvol);
+}
+
+static void
+ut_vbdev_lvol_io_type_supported(void)
+{
+ struct spdk_lvol *lvol;
+ bool ret;
+
+ lvol = calloc(1, sizeof(struct spdk_lvol));
+ SPDK_CU_ASSERT_FATAL(lvol != NULL);
+
+ g_blob_is_read_only = false;
+
+ /* Supported types */
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_READ);
+ CU_ASSERT(ret == true);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_WRITE);
+ CU_ASSERT(ret == true);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_RESET);
+ CU_ASSERT(ret == true);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_UNMAP);
+ CU_ASSERT(ret == true);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
+ CU_ASSERT(ret == true);
+
+ /* Unsupported types */
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_FLUSH);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_NVME_ADMIN);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_NVME_IO);
+ CU_ASSERT(ret == false);
+
+ g_blob_is_read_only = true;
+
+ /* Supported types */
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_READ);
+ CU_ASSERT(ret == true);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_RESET);
+ CU_ASSERT(ret == true);
+
+ /* Unsupported types */
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_WRITE);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_UNMAP);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_WRITE_ZEROES);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_FLUSH);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_NVME_ADMIN);
+ CU_ASSERT(ret == false);
+ ret = vbdev_lvol_io_type_supported(lvol, SPDK_BDEV_IO_TYPE_NVME_IO);
+ CU_ASSERT(ret == false);
+
+ free(lvol);
+}
+
+static void
+ut_lvol_read_write(void)
+{
+ g_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct lvol_task));
+ SPDK_CU_ASSERT_FATAL(g_io != NULL);
+ g_base_bdev = calloc(1, sizeof(struct spdk_bdev));
+ SPDK_CU_ASSERT_FATAL(g_base_bdev != NULL);
+ g_lvol = calloc(1, sizeof(struct spdk_lvol));
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ g_task = (struct lvol_task *)g_io->driver_ctx;
+ g_io->bdev = g_base_bdev;
+ g_io->bdev->ctxt = g_lvol;
+ g_io->u.bdev.offset_blocks = 20;
+ g_io->u.bdev.num_blocks = 20;
+
+ lvol_read(g_ch, g_io);
+ CU_ASSERT(g_task->status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ lvol_write(g_lvol, g_ch, g_io);
+ CU_ASSERT(g_task->status == SPDK_BDEV_IO_STATUS_SUCCESS);
+
+ free(g_io);
+ free(g_base_bdev);
+ free(g_lvol);
+}
+
+static void
+ut_vbdev_lvol_submit_request(void)
+{
+ struct spdk_lvol request_lvol = {};
+ g_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct lvol_task));
+ SPDK_CU_ASSERT_FATAL(g_io != NULL);
+ g_base_bdev = calloc(1, sizeof(struct spdk_bdev));
+ SPDK_CU_ASSERT_FATAL(g_base_bdev != NULL);
+ g_task = (struct lvol_task *)g_io->driver_ctx;
+ g_io->bdev = g_base_bdev;
+
+ g_io->type = SPDK_BDEV_IO_TYPE_READ;
+ g_base_bdev->ctxt = &request_lvol;
+ vbdev_lvol_submit_request(g_ch, g_io);
+
+ free(g_io);
+ free(g_base_bdev);
+}
+
+static void
+ut_lvs_rename(void)
+{
+ int rc = 0;
+ int sz = 10;
+ struct spdk_lvol_store *lvs;
+
+ /* Lvol store is successfully created */
+ rc = vbdev_lvs_create(&g_bdev, "old_lvs_name", 0, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(g_lvol_store->bs_dev != NULL);
+
+ lvs = g_lvol_store;
+ g_lvol_store = NULL;
+
+ g_base_bdev = calloc(1, sizeof(*g_base_bdev));
+ SPDK_CU_ASSERT_FATAL(g_base_bdev != NULL);
+
+ /* Successfully create lvol, which should be destroyed with lvs later */
+ g_lvolerrno = -1;
+ rc = vbdev_lvol_create(lvs, "lvol", sz, false, vbdev_lvol_create_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvolerrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ /* Trying to rename lvs with lvols created */
+ vbdev_lvs_rename(lvs, "new_lvs_name", lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name");
+ CU_ASSERT_STRING_EQUAL(TAILQ_FIRST(&g_lvol->bdev->aliases)->alias, "new_lvs_name/lvol");
+
+ /* Trying to rename lvs with name already used by another lvs */
+ /* This is a bdev_lvol test, so g_lvs_with_name_already_exists simulates
+ * existing lvs with name 'another_new_lvs_name' and this name in fact is not compared */
+ g_lvs_with_name_already_exists = true;
+ vbdev_lvs_rename(lvs, "another_new_lvs_name", lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == -EEXIST);
+ CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name");
+ CU_ASSERT_STRING_EQUAL(TAILQ_FIRST(&g_lvol->bdev->aliases)->alias, "new_lvs_name/lvol");
+ g_lvs_with_name_already_exists = false;
+
+ /* Unload lvol store */
+ g_lvol_store = lvs;
+ vbdev_lvs_destruct(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ free(g_base_bdev->name);
+ free(g_base_bdev);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("lvol", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "ut_lvs_init", ut_lvs_init) == NULL ||
+ CU_add_test(suite, "ut_lvol_init", ut_lvol_init) == NULL ||
+ CU_add_test(suite, "ut_lvol_snapshot", ut_lvol_snapshot) == NULL ||
+ CU_add_test(suite, "ut_lvol_clone", ut_lvol_clone) == NULL ||
+ CU_add_test(suite, "ut_lvs_destroy", ut_lvs_destroy) == NULL ||
+ CU_add_test(suite, "ut_lvs_unload", ut_lvs_unload) == NULL ||
+ CU_add_test(suite, "ut_lvol_resize", ut_lvol_resize) == NULL ||
+ CU_add_test(suite, "lvol_hotremove", ut_lvol_hotremove) == NULL ||
+ CU_add_test(suite, "ut_vbdev_lvol_get_io_channel", ut_vbdev_lvol_get_io_channel) == NULL ||
+ CU_add_test(suite, "ut_vbdev_lvol_io_type_supported", ut_vbdev_lvol_io_type_supported) == NULL ||
+ CU_add_test(suite, "ut_lvol_read_write", ut_lvol_read_write) == NULL ||
+ CU_add_test(suite, "ut_vbdev_lvol_submit_request", ut_vbdev_lvol_submit_request) == NULL ||
+ CU_add_test(suite, "lvol_examine", ut_lvol_examine) == NULL ||
+ CU_add_test(suite, "ut_lvol_rename", ut_lvol_rename) == NULL ||
+ CU_add_test(suite, "ut_lvol_destroy", ut_lvol_destroy) == NULL ||
+ CU_add_test(suite, "ut_lvs_rename", ut_lvs_rename) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/blob/Makefile b/src/spdk/test/unit/lib/blob/Makefile
new file mode 100644
index 00000000..c57d0b1c
--- /dev/null
+++ b/src/spdk/test/unit/lib/blob/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = blob.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/blob/blob.c/.gitignore b/src/spdk/test/unit/lib/blob/blob.c/.gitignore
new file mode 100644
index 00000000..553f5465
--- /dev/null
+++ b/src/spdk/test/unit/lib/blob/blob.c/.gitignore
@@ -0,0 +1 @@
+blob_ut
diff --git a/src/spdk/test/unit/lib/blob/blob.c/Makefile b/src/spdk/test/unit/lib/blob/blob.c/Makefile
new file mode 100644
index 00000000..6e279ff0
--- /dev/null
+++ b/src/spdk/test/unit/lib/blob/blob.c/Makefile
@@ -0,0 +1,41 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.mock.unittest.mk
+
+TEST_FILE = blob_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/blob/blob.c/blob_ut.c b/src/spdk/test/unit/lib/blob/blob.c/blob_ut.c
new file mode 100644
index 00000000..88f438eb
--- /dev/null
+++ b/src/spdk/test/unit/lib/blob/blob.c/blob_ut.c
@@ -0,0 +1,5914 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "spdk/blob.h"
+#include "spdk/string.h"
+
+#include "common/lib/test_env.c"
+#include "../bs_dev_common.c"
+#include "blob/blobstore.c"
+#include "blob/request.c"
+#include "blob/zeroes.c"
+#include "blob/blob_bs_dev.c"
+
+struct spdk_blob_store *g_bs;
+spdk_blob_id g_blobid;
+struct spdk_blob *g_blob;
+int g_bserrno;
+struct spdk_xattr_names *g_names;
+int g_done;
+char *g_xattr_names[] = {"first", "second", "third"};
+char *g_xattr_values[] = {"one", "two", "three"};
+uint64_t g_ctx = 1729;
+
+struct spdk_bs_super_block_ver1 {
+ uint8_t signature[8];
+ uint32_t version;
+ uint32_t length;
+ uint32_t clean; /* If there was a clean shutdown, this is 1. */
+ spdk_blob_id super_blob;
+
+ uint32_t cluster_size; /* In bytes */
+
+ uint32_t used_page_mask_start; /* Offset from beginning of disk, in pages */
+ uint32_t used_page_mask_len; /* Count, in pages */
+
+ uint32_t used_cluster_mask_start; /* Offset from beginning of disk, in pages */
+ uint32_t used_cluster_mask_len; /* Count, in pages */
+
+ uint32_t md_start; /* Offset from beginning of disk, in pages */
+ uint32_t md_len; /* Count, in pages */
+
+ uint8_t reserved[4036];
+ uint32_t crc;
+} __attribute__((packed));
+SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size");
+
+
+static void
+_get_xattr_value(void *arg, const char *name,
+ const void **value, size_t *value_len)
+{
+ uint64_t i;
+
+ SPDK_CU_ASSERT_FATAL(value_len != NULL);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(arg == &g_ctx)
+
+ for (i = 0; i < sizeof(g_xattr_names); i++) {
+ if (!strcmp(name, g_xattr_names[i])) {
+ *value_len = strlen(g_xattr_values[i]);
+ *value = g_xattr_values[i];
+ break;
+ }
+ }
+}
+
+static void
+_get_xattr_value_null(void *arg, const char *name,
+ const void **value, size_t *value_len)
+{
+ SPDK_CU_ASSERT_FATAL(value_len != NULL);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(arg == NULL)
+
+ *value_len = 0;
+ *value = NULL;
+}
+
+
+
+static void
+bs_op_complete(void *cb_arg, int bserrno)
+{
+ g_bserrno = bserrno;
+}
+
+static void
+bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
+ int bserrno)
+{
+ g_bs = bs;
+ g_bserrno = bserrno;
+}
+
+static void
+blob_op_complete(void *cb_arg, int bserrno)
+{
+ g_bserrno = bserrno;
+}
+
+static void
+blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
+{
+ g_blobid = blobid;
+ g_bserrno = bserrno;
+}
+
+static void
+blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
+{
+ g_blob = blb;
+ g_bserrno = bserrno;
+}
+
+static void
+blob_init(void)
+{
+ struct spdk_bs_dev *dev;
+
+ dev = init_dev();
+
+ /* should fail for an unsupported blocklen */
+ dev->blocklen = 500;
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ dev = init_dev();
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_super(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ spdk_blob_id blobid;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Get the super blob without having set one */
+ spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == -ENOENT);
+ CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
+
+ /* Create a blob */
+ spdk_bs_create_blob(bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ /* Set the blob as the super blob */
+ spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Get the super blob */
+ spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(blobid == g_blobid);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_open(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ spdk_blob_id blobid, blobid2;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ spdk_bs_create_blob(bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ blobid2 = spdk_blob_get_id(blob);
+ CU_ASSERT(blobid == blobid2);
+
+ /* Try to open file again. It should return success. */
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(blob == g_blob);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /*
+ * Close the file a second time, releasing the second reference. This
+ * should succeed.
+ */
+ blob = g_blob;
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /*
+ * Try to open file again. It should succeed. This tests the case
+ * where the file is opened, closed, then re-opened again.
+ */
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_create(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Create blob with 10 clusters */
+
+ spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10)
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Create blob with 0 clusters */
+
+ spdk_blob_opts_init(&opts);
+ opts.num_clusters = 0;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0)
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Create blob with default options (opts == NULL) */
+
+ spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0)
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Try to create blob with size larger than blobstore */
+
+ spdk_blob_opts_init(&opts);
+ opts.num_clusters = bs->total_clusters + 1;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == -ENOSPC);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+}
+
+static void
+blob_create_internal(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts opts;
+ struct spdk_blob_xattr_opts internal_xattrs;
+ const void *value;
+ size_t value_len;
+ spdk_blob_id blobid;
+ int rc;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Create blob with custom xattrs */
+
+ spdk_blob_opts_init(&opts);
+ _spdk_blob_xattrs_init(&internal_xattrs);
+ internal_xattrs.count = 3;
+ internal_xattrs.names = g_xattr_names;
+ internal_xattrs.get_value = _get_xattr_value;
+ internal_xattrs.ctx = &g_ctx;
+
+ _spdk_bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ rc = _spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[0]));
+ CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
+
+ rc = _spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[1]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
+
+ rc = _spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[2]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
+
+ rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
+ CU_ASSERT(rc != 0);
+
+ rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
+ CU_ASSERT(rc != 0);
+
+ rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
+ CU_ASSERT(rc != 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Create blob with NULL internal options */
+
+ _spdk_bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL);
+
+ blob = g_blob;
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+}
+
+static void
+blob_thin_provision(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts opts;
+ struct spdk_bs_opts bs_opts;
+ spdk_blob_id blobid;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&bs_opts);
+ snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ bs = g_bs;
+
+ /* Create blob with thin provisioning enabled */
+
+ spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+ opts.num_clusters = 10;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+ CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Do not shut down cleanly. This makes sure that when we load again
+ * and try to recover a valid used_cluster map, that blobstore will
+ * ignore clusters with index 0 since these are unallocated clusters.
+ */
+
+ /* Load an existing blob store and check if invalid_flags is set */
+ dev = init_dev();
+ snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
+ spdk_bs_load(dev, &bs_opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ bs = g_bs;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+ CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_snapshot(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_blob *snapshot, *snapshot2;
+ struct spdk_blob_bs_dev *blob_bs_dev;
+ struct spdk_blob_opts opts;
+ struct spdk_blob_xattr_opts xattrs;
+ spdk_blob_id blobid;
+ spdk_blob_id snapshotid;
+ const void *value;
+ size_t value_len;
+ int rc;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Create blob with 10 clusters */
+ spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10)
+
+ /* Create snapshot from blob */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ CU_ASSERT(snapshot->data_ro == true)
+ CU_ASSERT(snapshot->md_ro == true)
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10)
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10)
+ CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
+ CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
+ blob->active.num_clusters * sizeof(blob->active.clusters[0])));
+
+ /* Try to create snapshot from clone with xattrs */
+ xattrs.names = g_xattr_names;
+ xattrs.get_value = _get_xattr_value;
+ xattrs.count = 3;
+ xattrs.ctx = &g_ctx;
+ spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot2 = g_blob;
+ CU_ASSERT(snapshot2->data_ro == true)
+ CU_ASSERT(snapshot2->md_ro == true)
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10)
+
+ /* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
+ CU_ASSERT(snapshot->back_bs_dev == NULL);
+ SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
+ SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL);
+
+ blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
+ CU_ASSERT(blob_bs_dev->blob == snapshot2);
+
+ blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev;
+ CU_ASSERT(blob_bs_dev->blob == snapshot);
+
+ rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[0]));
+ CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
+
+ rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[1]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
+
+ rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[2]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
+
+ /* Try to create snapshot from snapshot */
+ spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == -EINVAL);
+ CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot2, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_snapshot_freeze_io(void)
+{
+ struct spdk_io_channel *channel;
+ struct spdk_bs_channel *bs_channel;
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid;
+ uint32_t num_of_pages = 10;
+ uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE];
+ uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE];
+ uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE];
+
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ memset(payload_read, 0x00, sizeof(payload_read));
+ memset(payload_zero, 0x00, sizeof(payload_zero));
+
+ dev = init_dev();
+ memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
+
+ /* Test freeze I/O during snapshot */
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ bs_channel = spdk_io_channel_get_ctx(channel);
+
+ /* Create blob with 10 clusters */
+ spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+ opts.thin_provision = false;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+
+ /* Enable explicitly calling callbacks. On each read/write to back device
+ * execution will stop and wait until _bs_flush_scheduler is called */
+ g_scheduler_delay = true;
+
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+
+ /* This is implementation specific.
+ * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback.
+ * Four async I/O operations happen before that. */
+
+ _bs_flush_scheduler(4);
+
+ CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io));
+
+ /* Blob I/O should be frozen here */
+ CU_ASSERT(blob->frozen_refcnt == 1);
+
+ /* Write to the blob */
+ spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL);
+
+ /* Verify that I/O is queued */
+ CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io));
+ /* Verify that payload is not written to disk */
+ CU_ASSERT(memcmp(payload_zero, &g_dev_buffer[blob->active.clusters[0]*SPDK_BS_PAGE_SIZE],
+ SPDK_BS_PAGE_SIZE) == 0);
+
+ /* Disable scheduler delay.
+ * Finish all operations including spdk_bs_create_snapshot */
+ g_scheduler_delay = false;
+ _bs_flush_scheduler(1);
+
+ /* Verify snapshot */
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+
+ /* Verify that blob has unset frozen_io */
+ CU_ASSERT(blob->frozen_refcnt == 0);
+
+ /* Verify that postponed I/O completed successfully by comparing payload */
+ spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_free_io_channel(channel);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_clone(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob_opts opts;
+ struct spdk_blob *blob, *snapshot, *clone;
+ spdk_blob_id blobid, cloneid, snapshotid;
+ struct spdk_blob_xattr_opts xattrs;
+ const void *value;
+ size_t value_len;
+ int rc;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Create blob with 10 clusters */
+
+ spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10)
+
+ /* Create snapshot */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ CU_ASSERT(snapshot->data_ro == true)
+ CU_ASSERT(snapshot->md_ro == true)
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Create clone from snapshot with xattrs */
+ xattrs.names = g_xattr_names;
+ xattrs.get_value = _get_xattr_value;
+ xattrs.count = 3;
+ xattrs.ctx = &g_ctx;
+
+ spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ cloneid = g_blobid;
+
+ spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ clone = g_blob;
+ CU_ASSERT(clone->data_ro == false)
+ CU_ASSERT(clone->md_ro == false)
+ CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
+
+ rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[0]));
+ CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
+
+ rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[1]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
+
+ rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[2]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
+
+
+ spdk_blob_close(clone, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Try to create clone from not read only blob */
+ spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == -EINVAL);
+ CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
+
+ /* Mark blob as read only */
+ spdk_blob_set_read_only(blob);
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Create clone from read only blob */
+ spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ cloneid = g_blobid;
+
+ spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ clone = g_blob;
+ CU_ASSERT(clone->data_ro == false)
+ CU_ASSERT(clone->md_ro == false)
+ CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
+
+ spdk_blob_close(clone, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+}
+
+static void
+_blob_inflate(bool decouple_parent)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob_opts opts;
+ struct spdk_blob *blob, *snapshot;
+ spdk_blob_id blobid, snapshotid;
+ struct spdk_io_channel *channel;
+ uint64_t free_clusters;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ SPDK_CU_ASSERT_FATAL(channel != NULL);
+
+ /* Create blob with 10 clusters */
+
+ spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+ opts.thin_provision = true;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10)
+ CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
+
+ /* 1) Blob with no parent */
+ if (decouple_parent) {
+ /* Decouple parent of blob with no parent (should fail) */
+ spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno != 0);
+ } else {
+ /* Inflate of thin blob with no parent should made it thick */
+ spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
+ }
+
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+
+ CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10)
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ CU_ASSERT(snapshot->data_ro == true)
+ CU_ASSERT(snapshot->md_ro == true)
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ /* 2) Blob with parent */
+ if (!decouple_parent) {
+ /* Do full blob inflation */
+ spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ /* all 10 clusters should be allocated */
+ CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
+ } else {
+ /* Decouple parent of blob */
+ spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ /* when only parent is removed, none of the clusters should be allocated */
+ CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
+ }
+
+ /* Now, it should be possible to delete snapshot */
+ spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10)
+ CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+ spdk_bs_free_io_channel(channel);
+}
+
+static void
+blob_inflate(void)
+{
+ _blob_inflate(false);
+ _blob_inflate(true);
+}
+
+static void
+blob_delete(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ spdk_blob_id blobid;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Create a blob and then delete it. */
+ spdk_bs_create_blob(bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid > 0);
+ blobid = g_blobid;
+
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Try to open the blob */
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == -ENOENT);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_resize(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ spdk_blob_id blobid;
+ uint64_t free_clusters;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ spdk_bs_create_blob(bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ /* Confirm that resize fails if blob is marked read-only. */
+ blob->md_ro = true;
+ spdk_blob_resize(blob, 5, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == -EPERM);
+ blob->md_ro = false;
+
+ /* The blob started at 0 clusters. Resize it to be 5. */
+ spdk_blob_resize(blob, 5, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
+
+ /* Shrink the blob to 3 clusters. This will not actually release
+ * the old clusters until the blob is synced.
+ */
+ spdk_blob_resize(blob, 3, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ /* Verify there are still 5 clusters in use */
+ CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
+
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ /* Now there are only 3 clusters in use */
+ CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
+
+ /* Resize the blob to be 10 clusters. Growth takes effect immediately. */
+ spdk_blob_resize(blob, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
+
+ /* Try to resize the blob to size larger than blobstore. */
+ spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == -ENOSPC);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_read_only(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_bs_opts opts;
+ spdk_blob_id blobid;
+ int rc;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ spdk_bs_create_blob(bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ rc = spdk_blob_set_read_only(blob);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(blob->data_ro == false);
+ CU_ASSERT(blob->md_ro == false);
+
+ spdk_blob_sync_md(blob, bs_op_complete, NULL);
+
+ CU_ASSERT(blob->data_ro == true);
+ CU_ASSERT(blob->md_ro == true);
+ CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(blob->data_ro == true);
+ CU_ASSERT(blob->md_ro == true);
+ CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+
+ /* Load an existing blob store */
+ dev = init_dev();
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ spdk_bs_open_blob(g_bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(blob->data_ro == true);
+ CU_ASSERT(blob->md_ro == true);
+ CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+}
+
+static void
+channel_ops(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_io_channel *channel;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_bs_free_io_channel(channel);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_write(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ spdk_blob_id blobid;
+ uint64_t pages_per_cluster;
+ uint8_t payload[10 * 4096];
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_bs_create_blob(bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ /* Write to a blob with 0 size */
+ spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ /* Resize the blob */
+ spdk_blob_resize(blob, 5, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Confirm that write fails if blob is marked read-only. */
+ blob->data_ro = true;
+ spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == -EPERM);
+ blob->data_ro = false;
+
+ /* Write to the blob */
+ spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Write starting beyond the end */
+ spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
+ NULL);
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ /* Write starting at a valid location but going off the end */
+ spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
+ blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_free_io_channel(channel);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_read(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ spdk_blob_id blobid;
+ uint64_t pages_per_cluster;
+ uint8_t payload[10 * 4096];
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_bs_create_blob(bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ /* Read from a blob with 0 size */
+ spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ /* Resize the blob */
+ spdk_blob_resize(blob, 5, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Confirm that read passes if blob is marked read-only. */
+ blob->data_ro = true;
+ spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ blob->data_ro = false;
+
+ /* Read from the blob */
+ spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Read starting beyond the end */
+ spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
+ NULL);
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ /* Read starting at a valid location but going off the end */
+ spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
+ blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_free_io_channel(channel);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_rw_verify(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ spdk_blob_id blobid;
+ uint8_t payload_read[10 * 4096];
+ uint8_t payload_write[10 * 4096];
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_bs_create_blob(bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ spdk_blob_resize(blob, 32, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_free_io_channel(channel);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_rw_verify_iov(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ spdk_blob_id blobid;
+ uint8_t payload_read[10 * 4096];
+ uint8_t payload_write[10 * 4096];
+ struct iovec iov_read[3];
+ struct iovec iov_write[3];
+ void *buf;
+
+ dev = init_dev();
+ memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_bs_create_blob(bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ spdk_blob_resize(blob, 2, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /*
+ * Manually adjust the offset of the blob's second cluster. This allows
+ * us to make sure that the readv/write code correctly accounts for I/O
+ * that cross cluster boundaries. Start by asserting that the allocated
+ * clusters are where we expect before modifying the second cluster.
+ */
+ CU_ASSERT(blob->active.clusters[0] == 1 * 256);
+ CU_ASSERT(blob->active.clusters[1] == 2 * 256);
+ blob->active.clusters[1] = 3 * 256;
+
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ iov_write[0].iov_base = payload_write;
+ iov_write[0].iov_len = 1 * 4096;
+ iov_write[1].iov_base = payload_write + 1 * 4096;
+ iov_write[1].iov_len = 5 * 4096;
+ iov_write[2].iov_base = payload_write + 6 * 4096;
+ iov_write[2].iov_len = 4 * 4096;
+ /*
+ * Choose a page offset just before the cluster boundary. The first 6 pages of payload
+ * will get written to the first cluster, the last 4 to the second cluster.
+ */
+ spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0xAA, sizeof(payload_read));
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = 3 * 4096;
+ iov_read[1].iov_base = payload_read + 3 * 4096;
+ iov_read[1].iov_len = 4 * 4096;
+ iov_read[2].iov_base = payload_read + 7 * 4096;
+ iov_read[2].iov_len = 3 * 4096;
+ spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ buf = calloc(1, 256 * 4096);
+ SPDK_CU_ASSERT_FATAL(buf != NULL);
+ /* Check that cluster 2 on "disk" was not modified. */
+ CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0);
+ free(buf);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_free_io_channel(channel);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static uint32_t
+bs_channel_get_req_count(struct spdk_io_channel *_channel)
+{
+ struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
+ struct spdk_bs_request_set *set;
+ uint32_t count = 0;
+
+ TAILQ_FOREACH(set, &channel->reqs, link) {
+ count++;
+ }
+
+ return count;
+}
+
+static void
+blob_rw_verify_iov_nomem(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ spdk_blob_id blobid;
+ uint8_t payload_write[10 * 4096];
+ struct iovec iov_write[3];
+ uint32_t req_count;
+
+ dev = init_dev();
+ memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_bs_create_blob(bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ spdk_blob_resize(blob, 2, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /*
+ * Choose a page offset just before the cluster boundary. The first 6 pages of payload
+ * will get written to the first cluster, the last 4 to the second cluster.
+ */
+ iov_write[0].iov_base = payload_write;
+ iov_write[0].iov_len = 1 * 4096;
+ iov_write[1].iov_base = payload_write + 1 * 4096;
+ iov_write[1].iov_len = 5 * 4096;
+ iov_write[2].iov_base = payload_write + 6 * 4096;
+ iov_write[2].iov_len = 4 * 4096;
+ MOCK_SET(calloc, NULL);
+ req_count = bs_channel_get_req_count(channel);
+ spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno = -ENOMEM);
+ CU_ASSERT(req_count == bs_channel_get_req_count(channel));
+ MOCK_CLEAR(calloc);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_free_io_channel(channel);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_rw_iov_read_only(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ spdk_blob_id blobid;
+ uint8_t payload_read[4096];
+ uint8_t payload_write[4096];
+ struct iovec iov_read;
+ struct iovec iov_write;
+
+ dev = init_dev();
+ memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_bs_create_blob(bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ spdk_blob_resize(blob, 2, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Verify that writev failed if read_only flag is set. */
+ blob->data_ro = true;
+ iov_write.iov_base = payload_write;
+ iov_write.iov_len = sizeof(payload_write);
+ spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == -EPERM);
+
+ /* Verify that reads pass if data_ro flag is set. */
+ iov_read.iov_base = payload_read;
+ iov_read.iov_len = sizeof(payload_read);
+ spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_free_io_channel(channel);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+_blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ uint8_t *payload, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ uint64_t i;
+ uint8_t *buf;
+ uint64_t page_size = spdk_bs_get_page_size(blob->bs);
+
+ /* To be sure that operation is NOT splitted, read one page at the time */
+ buf = payload;
+ for (i = 0; i < length; i++) {
+ spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
+ if (g_bserrno != 0) {
+ /* Pass the error code up */
+ break;
+ }
+ buf += page_size;
+ }
+
+ cb_fn(cb_arg, g_bserrno);
+}
+
+static void
+_blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
+ uint8_t *payload, uint64_t offset, uint64_t length,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ uint64_t i;
+ uint8_t *buf;
+ uint64_t page_size = spdk_bs_get_page_size(blob->bs);
+
+ /* To be sure that operation is NOT splitted, write one page at the time */
+ buf = payload;
+ for (i = 0; i < length; i++) {
+ spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
+ if (g_bserrno != 0) {
+ /* Pass the error code up */
+ break;
+ }
+ buf += page_size;
+ }
+
+ cb_fn(cb_arg, g_bserrno);
+}
+
+static void
+blob_operation_split_rw(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid;
+ uint64_t cluster_size;
+
+ uint64_t payload_size;
+ uint8_t *payload_read;
+ uint8_t *payload_write;
+ uint8_t *payload_pattern;
+
+ uint64_t page_size;
+ uint64_t pages_per_cluster;
+ uint64_t pages_per_payload;
+
+ uint64_t i;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ cluster_size = spdk_bs_get_cluster_size(bs);
+ page_size = spdk_bs_get_page_size(bs);
+ pages_per_cluster = cluster_size / page_size;
+ pages_per_payload = pages_per_cluster * 5;
+ payload_size = cluster_size * 5;
+
+ payload_read = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_read != NULL);
+
+ payload_write = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_write != NULL);
+
+ payload_pattern = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
+
+ /* Prepare random pattern to write */
+ memset(payload_pattern, 0xFF, payload_size);
+ for (i = 0; i < pages_per_payload; i++) {
+ *((uint64_t *)(payload_pattern + page_size * i)) = (i + 1);
+ }
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ SPDK_CU_ASSERT_FATAL(channel != NULL);
+
+ /* Create blob */
+ spdk_blob_opts_init(&opts);
+ opts.thin_provision = false;
+ opts.num_clusters = 5;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
+
+ /* Initial read should return zeroed payload */
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
+
+ /* Fill whole blob except last page */
+ spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1,
+ blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Write last page with a pattern */
+ spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1,
+ blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Read whole blob and check consistency */
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
+
+ /* Fill whole blob except first page */
+ spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1,
+ blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Write first page with a pattern */
+ spdk_blob_io_write(blob, channel, payload_pattern, 0, 1,
+ blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Read whole blob and check consistency */
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
+
+
+ /* Fill whole blob with a pattern (5 clusters) */
+
+ /* 1. Read test. */
+ _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
+ blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
+
+ /* 2. Write test. */
+ spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload,
+ blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0xFF, payload_size);
+ _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_free_io_channel(channel);
+
+ /* Unload the blob store */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+
+ free(payload_read);
+ free(payload_write);
+ free(payload_pattern);
+}
+
+static void
+blob_operation_split_rw_iov(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid;
+ uint64_t cluster_size;
+
+ uint64_t payload_size;
+ uint8_t *payload_read;
+ uint8_t *payload_write;
+ uint8_t *payload_pattern;
+
+ uint64_t page_size;
+ uint64_t pages_per_cluster;
+ uint64_t pages_per_payload;
+
+ struct iovec iov_read[2];
+ struct iovec iov_write[2];
+
+ uint64_t i, j;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ cluster_size = spdk_bs_get_cluster_size(bs);
+ page_size = spdk_bs_get_page_size(bs);
+ pages_per_cluster = cluster_size / page_size;
+ pages_per_payload = pages_per_cluster * 5;
+ payload_size = cluster_size * 5;
+
+ payload_read = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_read != NULL);
+
+ payload_write = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_write != NULL);
+
+ payload_pattern = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
+
+ /* Prepare random pattern to write */
+ for (i = 0; i < pages_per_payload; i++) {
+ for (j = 0; j < page_size / sizeof(uint64_t); j++) {
+ uint64_t *tmp;
+
+ tmp = (uint64_t *)payload_pattern;
+ tmp += ((page_size * i) / sizeof(uint64_t)) + j;
+ *tmp = i + 1;
+ }
+ }
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ SPDK_CU_ASSERT_FATAL(channel != NULL);
+
+ /* Create blob */
+ spdk_blob_opts_init(&opts);
+ opts.thin_provision = false;
+ opts.num_clusters = 5;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
+
+ /* Initial read should return zeroes payload */
+ memset(payload_read, 0xFF, payload_size);
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = cluster_size * 3;
+ iov_read[1].iov_base = payload_read + cluster_size * 3;
+ iov_read[1].iov_len = cluster_size * 2;
+ spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
+
+ /* First of iovs fills whole blob except last page and second of iovs writes last page
+ * with a pattern. */
+ iov_write[0].iov_base = payload_pattern;
+ iov_write[0].iov_len = payload_size - page_size;
+ iov_write[1].iov_base = payload_pattern;
+ iov_write[1].iov_len = page_size;
+ spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Read whole blob and check consistency */
+ memset(payload_read, 0xFF, payload_size);
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = cluster_size * 2;
+ iov_read[1].iov_base = payload_read + cluster_size * 2;
+ iov_read[1].iov_len = cluster_size * 3;
+ spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
+
+ /* First of iovs fills only first page and second of iovs writes whole blob except
+ * first page with a pattern. */
+ iov_write[0].iov_base = payload_pattern;
+ iov_write[0].iov_len = page_size;
+ iov_write[1].iov_base = payload_pattern;
+ iov_write[1].iov_len = payload_size - page_size;
+ spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Read whole blob and check consistency */
+ memset(payload_read, 0xFF, payload_size);
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = cluster_size * 4;
+ iov_read[1].iov_base = payload_read + cluster_size * 4;
+ iov_read[1].iov_len = cluster_size;
+ spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
+
+
+ /* Fill whole blob with a pattern (5 clusters) */
+
+ /* 1. Read test. */
+ _blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
+ blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0xFF, payload_size);
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = cluster_size;
+ iov_read[1].iov_base = payload_read + cluster_size;
+ iov_read[1].iov_len = cluster_size * 4;
+ spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
+
+ /* 2. Write test. */
+ iov_write[0].iov_base = payload_read;
+ iov_write[0].iov_len = cluster_size * 2;
+ iov_write[1].iov_base = payload_read + cluster_size * 2;
+ iov_write[1].iov_len = cluster_size * 3;
+ spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0xFF, payload_size);
+ _blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_free_io_channel(channel);
+
+ /* Unload the blob store */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+
+ free(payload_read);
+ free(payload_write);
+ free(payload_pattern);
+}
+
+static void
+blob_unmap(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ spdk_blob_id blobid;
+ struct spdk_blob_opts opts;
+ uint8_t payload[4096];
+ int i;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ spdk_blob_resize(blob, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload, 0, sizeof(payload));
+ payload[0] = 0xFF;
+
+ /*
+ * Set first byte of every cluster to 0xFF.
+ * First cluster on device is reserved so let's start from cluster number 1
+ */
+ for (i = 1; i < 11; i++) {
+ g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF;
+ }
+
+ /* Confirm writes */
+ for (i = 0; i < 10; i++) {
+ payload[0] = 0;
+ spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1,
+ blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(payload[0] == 0xFF);
+ }
+
+ /* Mark some clusters as unallocated */
+ blob->active.clusters[1] = 0;
+ blob->active.clusters[2] = 0;
+ blob->active.clusters[3] = 0;
+ blob->active.clusters[6] = 0;
+ blob->active.clusters[8] = 0;
+
+ /* Unmap clusters by resizing to 0 */
+ spdk_blob_resize(blob, 0, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Confirm that only 'allocated' clusters were unmapped */
+ for (i = 1; i < 11; i++) {
+ switch (i) {
+ case 2:
+ case 3:
+ case 4:
+ case 7:
+ case 9:
+ CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF);
+ break;
+ default:
+ CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0);
+ break;
+ }
+ }
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_free_io_channel(channel);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+
+static void
+blob_iter(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ spdk_blob_id blobid;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_blob == NULL);
+ CU_ASSERT(g_bserrno == -ENOENT);
+
+ spdk_bs_create_blob(bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_blob != NULL);
+ CU_ASSERT(g_bserrno == 0);
+ blob = g_blob;
+ CU_ASSERT(spdk_blob_get_id(blob) == blobid);
+
+ spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_blob == NULL);
+ CU_ASSERT(g_bserrno == -ENOENT);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_xattr(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ spdk_blob_id blobid;
+ uint64_t length;
+ int rc;
+ const char *name1, *name2;
+ const void *value;
+ size_t value_len;
+ struct spdk_xattr_names *names;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ spdk_bs_create_blob(bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ /* Test that set_xattr fails if md_ro flag is set. */
+ blob->md_ro = true;
+ rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
+ CU_ASSERT(rc == -EPERM);
+
+ blob->md_ro = false;
+ rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
+ CU_ASSERT(rc == 0);
+
+ length = 2345;
+ rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
+ CU_ASSERT(rc == 0);
+
+ /* Overwrite "length" xattr. */
+ length = 3456;
+ rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
+ CU_ASSERT(rc == 0);
+
+ /* get_xattr should still work even if md_ro flag is set. */
+ value = NULL;
+ blob->md_ro = true;
+ rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(*(uint64_t *)value == length);
+ CU_ASSERT(value_len == 8);
+ blob->md_ro = false;
+
+ rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
+ CU_ASSERT(rc == -ENOENT);
+
+ names = NULL;
+ rc = spdk_blob_get_xattr_names(blob, &names);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(names != NULL);
+ CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
+ name1 = spdk_xattr_names_get_name(names, 0);
+ SPDK_CU_ASSERT_FATAL(name1 != NULL);
+ CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length"));
+ name2 = spdk_xattr_names_get_name(names, 1);
+ SPDK_CU_ASSERT_FATAL(name2 != NULL);
+ CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length"));
+ CU_ASSERT(strcmp(name1, name2));
+ spdk_xattr_names_free(names);
+
+ /* Confirm that remove_xattr fails if md_ro is set to true. */
+ blob->md_ro = true;
+ rc = spdk_blob_remove_xattr(blob, "name");
+ CU_ASSERT(rc == -EPERM);
+
+ blob->md_ro = false;
+ rc = spdk_blob_remove_xattr(blob, "name");
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_blob_remove_xattr(blob, "foobar");
+ CU_ASSERT(rc == -ENOENT);
+
+ /* Set internal xattr */
+ length = 7898;
+ rc = _spdk_blob_set_xattr(blob, "internal", &length, sizeof(length), true);
+ CU_ASSERT(rc == 0);
+ rc = _spdk_blob_get_xattr_value(blob, "internal", &value, &value_len, true);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(*(uint64_t *)value == length);
+ /* try to get public xattr with same name */
+ rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
+ CU_ASSERT(rc != 0);
+ rc = _spdk_blob_get_xattr_value(blob, "internal", &value, &value_len, false);
+ CU_ASSERT(rc != 0);
+ /* Check if SPDK_BLOB_INTERNAL_XATTR is set */
+ CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
+ SPDK_BLOB_INTERNAL_XATTR)
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+
+ /* Check if xattrs are persisted */
+ dev = init_dev();
+
+ spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ bs = g_bs;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ rc = _spdk_blob_get_xattr_value(blob, "internal", &value, &value_len, true);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(*(uint64_t *)value == length);
+
+ /* try to get internal xattr trough public call */
+ rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
+ CU_ASSERT(rc != 0);
+
+ rc = _spdk_blob_remove_xattr(blob, "internal", true);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
+
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+bs_load(void)
+{
+ struct spdk_bs_dev *dev;
+ spdk_blob_id blobid;
+ struct spdk_blob *blob;
+ struct spdk_bs_super_block *super_block;
+ uint64_t length;
+ int rc;
+ const void *value;
+ size_t value_len;
+ struct spdk_bs_opts opts;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ /* Try to open a blobid that does not exist */
+ spdk_bs_open_blob(g_bs, 0, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == -ENOENT);
+ CU_ASSERT(g_blob == NULL);
+
+ /* Create a blob */
+ spdk_bs_create_blob(g_bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(g_bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ /* Try again to open valid blob but without the upper bit set */
+ spdk_bs_open_blob(g_bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == -ENOENT);
+ CU_ASSERT(g_blob == NULL);
+
+ /* Set some xattrs */
+ rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
+ CU_ASSERT(rc == 0);
+
+ length = 2345;
+ rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
+ CU_ASSERT(rc == 0);
+
+ /* Resize the blob */
+ spdk_blob_resize(blob, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ /* Unload the blob store */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+
+ super_block = (struct spdk_bs_super_block *)g_dev_buffer;
+ CU_ASSERT(super_block->clean == 1);
+
+ /* Load should fail for device with an unsupported blocklen */
+ dev = init_dev();
+ dev->blocklen = SPDK_BS_PAGE_SIZE * 2;
+ spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ /* Load should when max_md_ops is set to zero */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.max_md_ops = 0;
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ /* Load should when max_channel_ops is set to zero */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.max_channel_ops = 0;
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ /* Load an existing blob store */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ super_block = (struct spdk_bs_super_block *)g_dev_buffer;
+ CU_ASSERT(super_block->clean == 1);
+ CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
+
+ spdk_bs_open_blob(g_bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ /* Verify that blobstore is marked dirty after first metadata sync */
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ CU_ASSERT(super_block->clean == 1);
+
+ /* Get the xattrs */
+ value = NULL;
+ rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(*(uint64_t *)value == length);
+ CU_ASSERT(value_len == 8);
+
+ rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
+ CU_ASSERT(rc == -ENOENT);
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+ /* Load should fail: bdev size < saved size */
+ dev = init_dev();
+ dev->blockcnt /= 2;
+
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+
+ CU_ASSERT(g_bserrno == -EILSEQ);
+
+ /* Load should succeed: bdev size > saved size */
+ dev = init_dev();
+ dev->blockcnt *= 4;
+
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+
+ CU_ASSERT(g_bserrno == 0);
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+
+
+ /* Test compatibility mode */
+
+ dev = init_dev();
+ super_block->size = 0;
+ super_block->crc = _spdk_blob_md_page_calc_crc(super_block);
+
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ /* Create a blob */
+ spdk_bs_create_blob(g_bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+
+ /* Blobstore should update number of blocks in super_block */
+ CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
+ CU_ASSERT(super_block->clean == 0);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(super_block->clean == 1);
+ g_bs = NULL;
+
+}
+
+static void
+bs_load_custom_cluster_size(void)
+{
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_super_block *super_block;
+ struct spdk_bs_opts opts;
+ uint32_t custom_cluster_size = 4194304; /* 4MiB */
+ uint32_t cluster_sz;
+ uint64_t total_clusters;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.cluster_sz = custom_cluster_size;
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ cluster_sz = g_bs->cluster_sz;
+ total_clusters = g_bs->total_clusters;
+
+ /* Unload the blob store */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+
+ super_block = (struct spdk_bs_super_block *)g_dev_buffer;
+ CU_ASSERT(super_block->clean == 1);
+
+ /* Load an existing blob store */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ /* Compare cluster size and number to one after initialization */
+ CU_ASSERT(cluster_sz == g_bs->cluster_sz);
+ CU_ASSERT(total_clusters == g_bs->total_clusters);
+
+ super_block = (struct spdk_bs_super_block *)g_dev_buffer;
+ CU_ASSERT(super_block->clean == 1);
+ CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(super_block->clean == 1);
+ g_bs = NULL;
+}
+
+static void
+bs_type(void)
+{
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_opts opts;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ /* Unload the blob store */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+
+ /* Load non existing blobstore type */
+ dev = init_dev();
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno != 0);
+
+ /* Load with empty blobstore type */
+ dev = init_dev();
+ memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+ /* Initialize a new blob store with empty bstype */
+ dev = init_dev();
+ memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+ /* Load non existing blobstore type */
+ dev = init_dev();
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno != 0);
+
+ /* Load with empty blobstore type */
+ dev = init_dev();
+ memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+bs_super_block(void)
+{
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_super_block *super_block;
+ struct spdk_bs_opts opts;
+ struct spdk_bs_super_block_ver1 super_block_v1;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ /* Unload the blob store */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+
+ /* Load an existing blob store with version newer than supported */
+ super_block = (struct spdk_bs_super_block *)g_dev_buffer;
+ super_block->version++;
+
+ dev = init_dev();
+ memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno != 0);
+
+ /* Create a new blob store with super block version 1 */
+ dev = init_dev();
+ super_block_v1.version = 1;
+ memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature));
+ super_block_v1.length = 0x1000;
+ super_block_v1.clean = 1;
+ super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF;
+ super_block_v1.cluster_size = 0x100000;
+ super_block_v1.used_page_mask_start = 0x01;
+ super_block_v1.used_page_mask_len = 0x01;
+ super_block_v1.used_cluster_mask_start = 0x02;
+ super_block_v1.used_cluster_mask_len = 0x01;
+ super_block_v1.md_start = 0x03;
+ super_block_v1.md_len = 0x40;
+ memset(super_block_v1.reserved, 0, 4036);
+ super_block_v1.crc = _spdk_blob_md_page_calc_crc(&super_block_v1);
+ memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1));
+
+ memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+/*
+ * Create a blobstore and then unload it.
+ */
+static void
+bs_unload(void)
+{
+ struct spdk_bs_dev *dev;
+ struct spdk_blob_store *bs;
+ spdk_blob_id blobid;
+ struct spdk_blob *blob;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Create a blob and open it. */
+ g_bserrno = -1;
+ spdk_bs_create_blob(bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid > 0);
+ blobid = g_blobid;
+
+ g_bserrno = -1;
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ /* Try to unload blobstore, should fail with open blob */
+ g_bserrno = -1;
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == -EBUSY);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ /* Close the blob, then successfully unload blobstore */
+ g_bserrno = -1;
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ g_bserrno = -1;
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+/*
+ * Create a blobstore with a cluster size different than the default, and ensure it is
+ * persisted.
+ */
+static void
+bs_cluster_sz(void)
+{
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_opts opts;
+ uint32_t cluster_sz;
+
+ /* Set cluster size to zero */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.cluster_sz = 0;
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == -EINVAL);
+ SPDK_CU_ASSERT_FATAL(g_bs == NULL);
+
+ /*
+ * Set cluster size to blobstore page size,
+ * to work it is required to be at least twice the blobstore page size.
+ */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.cluster_sz = SPDK_BS_PAGE_SIZE;
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == -ENOMEM);
+ SPDK_CU_ASSERT_FATAL(g_bs == NULL);
+
+ /*
+ * Set cluster size to lower than page size,
+ * to work it is required to be at least twice the blobstore page size.
+ */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1;
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == -EINVAL);
+ SPDK_CU_ASSERT_FATAL(g_bs == NULL);
+
+ /* Set cluster size to twice the default */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.cluster_sz *= 2;
+ cluster_sz = opts.cluster_sz;
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ CU_ASSERT(spdk_bs_get_cluster_size(g_bs) == cluster_sz);
+
+ /* Unload the blob store */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+
+ dev = init_dev();
+ /* Load an existing blob store */
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ CU_ASSERT(spdk_bs_get_cluster_size(g_bs) == cluster_sz);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+/*
+ * Create a blobstore, reload it and ensure total usable cluster count
+ * stays the same.
+ */
+static void
+bs_usable_clusters(void)
+{
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_opts opts;
+ uint32_t clusters;
+ int i;
+
+ /* Init blobstore */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ clusters = spdk_bs_total_data_cluster_count(g_bs);
+
+ /* Unload the blob store */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+ dev = init_dev();
+ /* Load an existing blob store */
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ CU_ASSERT(spdk_bs_total_data_cluster_count(g_bs) == clusters);
+
+ /* Create and resize blobs to make sure that useable cluster count won't change */
+ for (i = 0; i < 4; i++) {
+ g_bserrno = -1;
+ g_blobid = SPDK_BLOBID_INVALID;
+ spdk_bs_create_blob(g_bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+
+ g_bserrno = -1;
+ g_blob = NULL;
+ spdk_bs_open_blob(g_bs, g_blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+
+ spdk_blob_resize(g_blob, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ g_bserrno = -1;
+ spdk_blob_close(g_blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(spdk_bs_total_data_cluster_count(g_bs) == clusters);
+ }
+
+ /* Reload the blob store to make sure that nothing changed */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+ dev = init_dev();
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ CU_ASSERT(spdk_bs_total_data_cluster_count(g_bs) == clusters);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+/*
+ * Test resizing of the metadata blob. This requires creating enough blobs
+ * so that one cluster is not enough to fit the metadata for those blobs.
+ * To induce this condition to happen more quickly, we reduce the cluster
+ * size to 16KB, which means only 4 4KB blob metadata pages can fit.
+ */
+static void
+bs_resize_md(void)
+{
+ const int CLUSTER_PAGE_COUNT = 4;
+ const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_opts opts;
+ uint32_t cluster_sz;
+ spdk_blob_id blobids[NUM_BLOBS];
+ int i;
+
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096;
+ cluster_sz = opts.cluster_sz;
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ CU_ASSERT(spdk_bs_get_cluster_size(g_bs) == cluster_sz);
+
+ for (i = 0; i < NUM_BLOBS; i++) {
+ g_bserrno = -1;
+ g_blobid = SPDK_BLOBID_INVALID;
+ spdk_bs_create_blob(g_bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobids[i] = g_blobid;
+ }
+
+ /* Unload the blob store */
+ g_bserrno = -1;
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Load an existing blob store */
+ g_bserrno = -1;
+ g_bs = NULL;
+ dev = init_dev();
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ CU_ASSERT(spdk_bs_get_cluster_size(g_bs) == cluster_sz);
+
+ for (i = 0; i < NUM_BLOBS; i++) {
+ g_bserrno = -1;
+ g_blob = NULL;
+ spdk_bs_open_blob(g_bs, blobids[i], blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ g_bserrno = -1;
+ spdk_blob_close(g_blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ }
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+bs_destroy(void)
+{
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_opts opts;
+
+ /* Initialize a new blob store */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ /* Destroy the blob store */
+ g_bserrno = -1;
+ spdk_bs_destroy(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Loading an non-existent blob store should fail. */
+ g_bs = NULL;
+ dev = init_dev();
+
+ g_bserrno = 0;
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno != 0);
+}
+
+/* Try to hit all of the corner cases associated with serializing
+ * a blob to disk
+ */
+static void
+blob_serialize(void)
+{
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_opts opts;
+ struct spdk_blob_store *bs;
+ spdk_blob_id blobid[2];
+ struct spdk_blob *blob[2];
+ uint64_t i;
+ char *value;
+ int rc;
+
+ dev = init_dev();
+
+ /* Initialize a new blobstore with very small clusters */
+ spdk_bs_opts_init(&opts);
+ opts.cluster_sz = dev->blocklen * 8;
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Create and open two blobs */
+ for (i = 0; i < 2; i++) {
+ spdk_bs_create_blob(bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid[i] = g_blobid;
+
+ /* Open a blob */
+ spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob[i] = g_blob;
+
+ /* Set a fairly large xattr on both blobs to eat up
+ * metadata space
+ */
+ value = calloc(dev->blocklen - 64, sizeof(char));
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ memset(value, i, dev->blocklen / 2);
+ rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64);
+ CU_ASSERT(rc == 0);
+ free(value);
+ }
+
+ /* Resize the blobs, alternating 1 cluster at a time.
+ * This thwarts run length encoding and will cause spill
+ * over of the extents.
+ */
+ for (i = 0; i < 6; i++) {
+ spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ }
+
+ for (i = 0; i < 2; i++) {
+ spdk_blob_sync_md(blob[i], blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ }
+
+ /* Close the blobs */
+ for (i = 0; i < 2; i++) {
+ spdk_blob_close(blob[i], blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ }
+
+ /* Unload the blobstore */
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+ bs = NULL;
+
+ dev = init_dev();
+ /* Load an existing blob store */
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ for (i = 0; i < 2; i++) {
+ blob[i] = NULL;
+
+ spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob[i] = g_blob;
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
+
+ spdk_blob_close(blob[i], blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ }
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_crc(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ spdk_blob_id blobid;
+ uint32_t page_num;
+ int index;
+ struct spdk_blob_md_page *page;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ spdk_bs_create_blob(bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ page_num = _spdk_bs_blobid_to_page(blobid);
+ index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
+ page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
+ page->crc = 0;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == -EINVAL);
+ CU_ASSERT(g_blob == NULL);
+ g_bserrno = 0;
+
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+super_block_crc(void)
+{
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_super_block *super_block;
+ struct spdk_bs_opts opts;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+ super_block = (struct spdk_bs_super_block *)g_dev_buffer;
+ super_block->crc = 0;
+ dev = init_dev();
+
+ /* Load an existing blob store */
+ g_bserrno = 0;
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == -EILSEQ);
+}
+
+/* For blob dirty shutdown test case we do the following sub-test cases:
+ * 1 Initialize new blob store and create 1 super blob with some xattrs, then we
+ * dirty shutdown and reload the blob store and verify the xattrs.
+ * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown,
+ * reload the blob store and verify the clusters number.
+ * 3 Create the second blob and then dirty shutdown, reload the blob store
+ * and verify the second blob.
+ * 4 Delete the second blob and then dirty shutdown, reload the blob store
+ * and verify the second blob is invalid.
+ * 5 Create the second blob again and also create the third blob, modify the
+ * md of second blob which makes the md invalid, and then dirty shutdown,
+ * reload the blob store verify the second blob, it should invalid and also
+ * verify the third blob, it should correct.
+ */
+static void
+blob_dirty_shutdown(void)
+{
+ int rc;
+ int index;
+ struct spdk_bs_dev *dev;
+ spdk_blob_id blobid1, blobid2, blobid3;
+ struct spdk_blob *blob;
+ uint64_t length;
+ uint64_t free_clusters;
+ const void *value;
+ size_t value_len;
+ uint32_t page_num;
+ struct spdk_blob_md_page *page;
+ struct spdk_bs_opts opts;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ /* Create first blob */
+ spdk_bs_create_blob(g_bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid1 = g_blobid;
+
+ spdk_bs_open_blob(g_bs, blobid1, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ /* Set some xattrs */
+ rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
+ CU_ASSERT(rc == 0);
+
+ length = 2345;
+ rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
+ CU_ASSERT(rc == 0);
+
+ /* Resize the blob */
+ spdk_blob_resize(blob, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Set the blob as the super blob */
+ spdk_bs_set_super(g_bs, blobid1, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ free_clusters = spdk_bs_free_cluster_count(g_bs);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ /* Dirty shutdown */
+ _spdk_bs_free(g_bs);
+
+ /* reload blobstore */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Get the super blob */
+ spdk_bs_get_super(g_bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(blobid1 == g_blobid);
+
+ spdk_bs_open_blob(g_bs, blobid1, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(g_bs));
+
+ /* Get the xattrs */
+ value = NULL;
+ rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(*(uint64_t *)value == length);
+ CU_ASSERT(value_len == 8);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+
+ /* Resize the blob */
+ spdk_blob_resize(blob, 20, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ free_clusters = spdk_bs_free_cluster_count(g_bs);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ /* Dirty shutdown */
+ _spdk_bs_free(g_bs);
+
+ /* reload the blobstore */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ /* Load an existing blob store */
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ spdk_bs_open_blob(g_bs, blobid1, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(g_bs));
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ /* Create second blob */
+ spdk_bs_create_blob(g_bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid2 = g_blobid;
+
+ spdk_bs_open_blob(g_bs, blobid2, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ /* Set some xattrs */
+ rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
+ CU_ASSERT(rc == 0);
+
+ length = 5432;
+ rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
+ CU_ASSERT(rc == 0);
+
+ /* Resize the blob */
+ spdk_blob_resize(blob, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ free_clusters = spdk_bs_free_cluster_count(g_bs);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ /* Dirty shutdown */
+ _spdk_bs_free(g_bs);
+
+ /* reload the blobstore */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_open_blob(g_bs, blobid2, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ /* Get the xattrs */
+ value = NULL;
+ rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(*(uint64_t *)value == length);
+ CU_ASSERT(value_len == 8);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(g_bs));
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ spdk_bs_delete_blob(g_bs, blobid2, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ free_clusters = spdk_bs_free_cluster_count(g_bs);
+
+ /* Dirty shutdown */
+ _spdk_bs_free(g_bs);
+ /* reload the blobstore */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_open_blob(g_bs, blobid2, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno != 0);
+ CU_ASSERT(g_blob == NULL);
+
+ spdk_bs_open_blob(g_bs, blobid1, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(g_bs));
+ spdk_blob_close(g_blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+ /* reload the blobstore */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Create second blob */
+ spdk_bs_create_blob(g_bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid2 = g_blobid;
+
+ /* Create third blob */
+ spdk_bs_create_blob(g_bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid3 = g_blobid;
+
+ spdk_bs_open_blob(g_bs, blobid2, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ /* Set some xattrs for second blob */
+ rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
+ CU_ASSERT(rc == 0);
+
+ length = 5432;
+ rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
+ CU_ASSERT(rc == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ spdk_bs_open_blob(g_bs, blobid3, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ /* Set some xattrs for third blob */
+ rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1);
+ CU_ASSERT(rc == 0);
+
+ length = 5432;
+ rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
+ CU_ASSERT(rc == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ /* Mark second blob as invalid */
+ page_num = _spdk_bs_blobid_to_page(blobid2);
+
+ index = DEV_BUFFER_BLOCKLEN * (g_bs->md_start + page_num);
+ page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
+ page->sequence_num = 1;
+ page->crc = _spdk_blob_md_page_calc_crc(page);
+
+ free_clusters = spdk_bs_free_cluster_count(g_bs);
+
+ /* Dirty shutdown */
+ _spdk_bs_free(g_bs);
+ /* reload the blobstore */
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_open_blob(g_bs, blobid2, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno != 0);
+ CU_ASSERT(g_blob == NULL);
+
+ spdk_bs_open_blob(g_bs, blobid3, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(g_bs));
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_flags(void)
+{
+ struct spdk_bs_dev *dev;
+ spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro;
+ struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro;
+ struct spdk_bs_opts opts;
+ int rc;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ /* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */
+ spdk_bs_create_blob(g_bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid_invalid = g_blobid;
+
+ spdk_bs_create_blob(g_bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid_data_ro = g_blobid;
+
+ spdk_bs_create_blob(g_bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid_md_ro = g_blobid;
+
+ spdk_bs_open_blob(g_bs, blobid_invalid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob_invalid = g_blob;
+
+ spdk_bs_open_blob(g_bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob_data_ro = g_blob;
+
+ spdk_bs_open_blob(g_bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob_md_ro = g_blob;
+
+ /* Change the size of blob_data_ro to check if flags are serialized
+ * when blob has non zero number of extents */
+ spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Set the xattr to check if flags are serialized
+ * when blob has non zero number of xattrs */
+ rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
+ CU_ASSERT(rc == 0);
+
+ blob_invalid->invalid_flags = (1ULL << 63);
+ blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
+ blob_data_ro->data_ro_flags = (1ULL << 62);
+ blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
+ blob_md_ro->md_ro_flags = (1ULL << 61);
+ blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
+
+ g_bserrno = -1;
+ spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bserrno = -1;
+ spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bserrno = -1;
+ spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ g_bserrno = -1;
+ spdk_blob_close(blob_invalid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ blob_invalid = NULL;
+ g_bserrno = -1;
+ spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ blob_data_ro = NULL;
+ g_bserrno = -1;
+ spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ blob_md_ro = NULL;
+
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ /* Unload the blob store */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+ /* Load an existing blob store */
+ dev = init_dev();
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ g_blob = NULL;
+ g_bserrno = 0;
+ spdk_bs_open_blob(g_bs, blobid_invalid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno != 0);
+ CU_ASSERT(g_blob == NULL);
+
+ g_blob = NULL;
+ g_bserrno = -1;
+ spdk_bs_open_blob(g_bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob_data_ro = g_blob;
+ /* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
+ CU_ASSERT(blob_data_ro->data_ro == true);
+ CU_ASSERT(blob_data_ro->md_ro == true);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
+
+ g_blob = NULL;
+ g_bserrno = -1;
+ spdk_bs_open_blob(g_bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob_md_ro = g_blob;
+ CU_ASSERT(blob_md_ro->data_ro == false);
+ CU_ASSERT(blob_md_ro->md_ro == true);
+
+ g_bserrno = -1;
+ spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+}
+
+static void
+bs_version(void)
+{
+ struct spdk_bs_super_block *super;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_opts opts;
+ spdk_blob_id blobid;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ /* Unload the blob store */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+ /*
+ * Change the bs version on disk. This will allow us to
+ * test that the version does not get modified automatically
+ * when loading and unloading the blobstore.
+ */
+ super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
+ CU_ASSERT(super->version == SPDK_BS_VERSION);
+ CU_ASSERT(super->clean == 1);
+ super->version = 2;
+ /*
+ * Version 2 metadata does not have a used blobid mask, so clear
+ * those fields in the super block and zero the corresponding
+ * region on "disk". We will use this to ensure blob IDs are
+ * correctly reconstructed.
+ */
+ memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0,
+ super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE);
+ super->used_blobid_mask_start = 0;
+ super->used_blobid_mask_len = 0;
+ super->crc = _spdk_blob_md_page_calc_crc(super);
+
+ /* Load an existing blob store */
+ dev = init_dev();
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ CU_ASSERT(super->clean == 1);
+
+ /*
+ * Create a blob - just to make sure that when we unload it
+ * results in writing the super block (since metadata pages
+ * were allocated.
+ */
+ spdk_bs_create_blob(g_bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ /* Unload the blob store */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ CU_ASSERT(super->version == 2);
+ CU_ASSERT(super->used_blobid_mask_start == 0);
+ CU_ASSERT(super->used_blobid_mask_len == 0);
+
+ dev = init_dev();
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ g_blob = NULL;
+ spdk_bs_open_blob(g_bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+
+ spdk_blob_close(g_blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ CU_ASSERT(super->version == 2);
+ CU_ASSERT(super->used_blobid_mask_start == 0);
+ CU_ASSERT(super->used_blobid_mask_len == 0);
+}
+
+static void
+blob_set_xattrs(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid;
+ const void *value;
+ size_t value_len;
+ int rc;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* Create blob with extra attributes */
+ spdk_blob_opts_init(&opts);
+
+ opts.xattrs.names = g_xattr_names;
+ opts.xattrs.get_value = _get_xattr_value;
+ opts.xattrs.count = 3;
+ opts.xattrs.ctx = &g_ctx;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ /* Get the xattrs */
+ value = NULL;
+
+ rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[0]));
+ CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
+
+ rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[1]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
+
+ rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(value_len == strlen(g_xattr_values[2]));
+ CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
+
+ /* Try to get non existing attribute */
+
+ rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
+ CU_ASSERT(rc == -ENOENT);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+ g_blobid = SPDK_BLOBID_INVALID;
+
+ /* NULL callback */
+ spdk_blob_opts_init(&opts);
+ opts.xattrs.names = g_xattr_names;
+ opts.xattrs.get_value = NULL;
+ opts.xattrs.count = 1;
+ opts.xattrs.ctx = &g_ctx;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == -EINVAL);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+
+ /* NULL values */
+ spdk_blob_opts_init(&opts);
+ opts.xattrs.names = g_xattr_names;
+ opts.xattrs.get_value = _get_xattr_value_null;
+ opts.xattrs.count = 1;
+ opts.xattrs.ctx = NULL;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == -EINVAL);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+}
+
+static void
+blob_thin_prov_alloc(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid;
+ uint64_t free_clusters;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ /* Set blob as thin provisioned */
+ spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(blob->active.num_clusters == 0);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
+
+ /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
+ spdk_blob_resize(blob, 5, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 5);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
+
+ /* Grow it to 1TB - still unallocated */
+ spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 262144);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
+
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ /* Sync must not change anything */
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 262144);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
+ /* Since clusters are not allocated,
+ * number of metadata pages is expected to be minimal.
+ */
+ CU_ASSERT(blob->active.num_pages == 1);
+
+ /* Shrink the blob to 3 clusters - still unallocated */
+ spdk_blob_resize(blob, 3, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 3);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
+
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ /* Sync must not change anything */
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 3);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Unload the blob store */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+
+ /* Load an existing blob store */
+ dev = init_dev();
+ spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ bs = g_bs;
+
+ spdk_bs_open_blob(g_bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ /* Check that clusters allocation and size is still the same */
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 3);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_insert_cluster_msg(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid;
+ uint64_t free_clusters;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ /* Set blob as thin provisioned */
+ spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+ opts.num_clusters = 4;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(blob->active.num_clusters == 4);
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
+ CU_ASSERT(blob->active.clusters[1] == 0);
+
+ _spdk_bs_claim_cluster(bs, 0xF);
+ _spdk_blob_insert_cluster_on_md_thread(blob, 1, 0xF, blob_op_complete, NULL);
+
+ CU_ASSERT(blob->active.clusters[1] != 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Unload the blob store */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+
+ /* Load an existing blob store */
+ dev = init_dev();
+ spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ bs = g_bs;
+
+ spdk_bs_open_blob(g_bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(blob->active.clusters[1] != 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_thin_prov_rw(void)
+{
+ static const uint8_t zero[10 * 4096] = { 0 };
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid;
+ uint64_t free_clusters;
+ uint64_t page_size;
+ uint8_t payload_read[10 * 4096];
+ uint8_t payload_write[10 * 4096];
+ uint64_t write_bytes;
+ uint64_t read_bytes;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+ free_clusters = spdk_bs_free_cluster_count(bs);
+ page_size = spdk_bs_get_page_size(bs);
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(blob->active.num_clusters == 0);
+
+ /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
+ spdk_blob_resize(blob, 5, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 5);
+
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ /* Sync must not change anything */
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 5);
+
+ /* Payload should be all zeros from unallocated clusters */
+ memset(payload_read, 0xFF, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
+
+ write_bytes = g_dev_write_bytes;
+ read_bytes = g_dev_read_bytes;
+
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
+ /* For thin-provisioned blob we need to write 10 pages plus one page metadata and
+ * read 0 bytes */
+ CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11);
+ CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
+
+ spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+
+ spdk_bs_free_io_channel(channel);
+
+ /* Unload the blob store */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+}
+
+static void
+blob_thin_prov_rw_iov(void)
+{
+ static const uint8_t zero[10 * 4096] = { 0 };
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid;
+ uint64_t free_clusters;
+ uint8_t payload_read[10 * 4096];
+ uint8_t payload_write[10 * 4096];
+ struct iovec iov_read[3];
+ struct iovec iov_write[3];
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(blob->active.num_clusters == 0);
+
+ /* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
+ spdk_blob_resize(blob, 5, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 5);
+
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ /* Sync must not change anything */
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ CU_ASSERT(blob->active.num_clusters == 5);
+
+ /* Payload should be all zeros from unallocated clusters */
+ memset(payload_read, 0xAA, sizeof(payload_read));
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = 3 * 4096;
+ iov_read[1].iov_base = payload_read + 3 * 4096;
+ iov_read[1].iov_len = 4 * 4096;
+ iov_read[2].iov_base = payload_read + 7 * 4096;
+ iov_read[2].iov_len = 3 * 4096;
+ spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
+
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ iov_write[0].iov_base = payload_write;
+ iov_write[0].iov_len = 1 * 4096;
+ iov_write[1].iov_base = payload_write + 1 * 4096;
+ iov_write[1].iov_len = 5 * 4096;
+ iov_write[2].iov_base = payload_write + 6 * 4096;
+ iov_write[2].iov_len = 4 * 4096;
+
+ spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0xAA, sizeof(payload_read));
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = 3 * 4096;
+ iov_read[1].iov_base = payload_read + 3 * 4096;
+ iov_read[1].iov_len = 4 * 4096;
+ iov_read[2].iov_base = payload_read + 7 * 4096;
+ iov_read[2].iov_len = 3 * 4096;
+ spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_free_io_channel(channel);
+
+ /* Unload the blob store */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+}
+
+struct iter_ctx {
+ int current_iter;
+ spdk_blob_id blobid[4];
+};
+
+static void
+test_iter(void *arg, struct spdk_blob *blob, int bserrno)
+{
+ struct iter_ctx *iter_ctx = arg;
+ spdk_blob_id blobid;
+
+ CU_ASSERT(bserrno == 0);
+ blobid = spdk_blob_get_id(blob);
+ CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]);
+}
+
+static void
+bs_load_iter(void)
+{
+ struct spdk_bs_dev *dev;
+ struct iter_ctx iter_ctx = { 0 };
+ struct spdk_blob *blob;
+ int i, rc;
+ struct spdk_bs_opts opts;
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ for (i = 0; i < 4; i++) {
+ g_bserrno = -1;
+ g_blobid = SPDK_BLOBID_INVALID;
+ spdk_bs_create_blob(g_bs, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ iter_ctx.blobid[i] = g_blobid;
+
+ g_bserrno = -1;
+ g_blob = NULL;
+ spdk_bs_open_blob(g_bs, g_blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ /* Just save the blobid as an xattr for testing purposes. */
+ rc = spdk_blob_set_xattr(blob, "blobid", &g_blobid, sizeof(g_blobid));
+ CU_ASSERT(rc == 0);
+
+ /* Resize the blob */
+ spdk_blob_resize(blob, i, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ }
+
+ g_bserrno = -1;
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ opts.iter_cb_fn = test_iter;
+ opts.iter_cb_arg = &iter_ctx;
+
+ /* Test blob iteration during load after a clean shutdown. */
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ /* Dirty shutdown */
+ _spdk_bs_free(g_bs);
+
+ dev = init_dev();
+ spdk_bs_opts_init(&opts);
+ snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
+ opts.iter_cb_fn = test_iter;
+ iter_ctx.current_iter = 0;
+ opts.iter_cb_arg = &iter_ctx;
+
+ /* Test blob iteration during load after a dirty shutdown. */
+ spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+}
+
+static void
+blob_snapshot_rw(void)
+{
+ static const uint8_t zero[10 * 4096] = { 0 };
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob, *snapshot;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid, snapshotid;
+ uint64_t free_clusters;
+ uint64_t cluster_size;
+ uint64_t page_size;
+ uint8_t payload_read[10 * 4096];
+ uint8_t payload_write[10 * 4096];
+ uint64_t write_bytes;
+ uint64_t read_bytes;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+ free_clusters = spdk_bs_free_cluster_count(bs);
+ cluster_size = spdk_bs_get_cluster_size(bs);
+ page_size = spdk_bs_get_page_size(bs);
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+ opts.num_clusters = 5;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
+
+ memset(payload_read, 0xFF, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
+
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
+
+ /* Create snapshot from blob */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ CU_ASSERT(snapshot->data_ro == true)
+ CU_ASSERT(snapshot->md_ro == true)
+
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5)
+
+ write_bytes = g_dev_write_bytes;
+ read_bytes = g_dev_read_bytes;
+
+ memset(payload_write, 0xAA, sizeof(payload_write));
+ spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
+
+ /* For a clone we need to allocate and copy one cluster, update one page of metadata
+ * and then write 10 pages of payload.
+ */
+ CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size);
+ CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size);
+
+ spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ /* Data on snapshot should not change after write to clone */
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_free_io_channel(channel);
+
+ /* Unload the blob store */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+}
+
+static void
+blob_snapshot_rw_iov(void)
+{
+ static const uint8_t zero[10 * 4096] = { 0 };
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob, *snapshot;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid, snapshotid;
+ uint64_t free_clusters;
+ uint8_t payload_read[10 * 4096];
+ uint8_t payload_write[10 * 4096];
+ struct iovec iov_read[3];
+ struct iovec iov_write[3];
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+ free_clusters = spdk_bs_free_cluster_count(bs);
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+ opts.num_clusters = 5;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
+
+ /* Create snapshot from blob */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ CU_ASSERT(snapshot->data_ro == true)
+ CU_ASSERT(snapshot->md_ro == true)
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
+
+ /* Payload should be all zeros from unallocated clusters */
+ memset(payload_read, 0xAA, sizeof(payload_read));
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = 3 * 4096;
+ iov_read[1].iov_base = payload_read + 3 * 4096;
+ iov_read[1].iov_len = 4 * 4096;
+ iov_read[2].iov_base = payload_read + 7 * 4096;
+ iov_read[2].iov_len = 3 * 4096;
+ spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
+
+ memset(payload_write, 0xE5, sizeof(payload_write));
+ iov_write[0].iov_base = payload_write;
+ iov_write[0].iov_len = 1 * 4096;
+ iov_write[1].iov_base = payload_write + 1 * 4096;
+ iov_write[1].iov_len = 5 * 4096;
+ iov_write[2].iov_base = payload_write + 6 * 4096;
+ iov_write[2].iov_len = 4 * 4096;
+
+ spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ memset(payload_read, 0xAA, sizeof(payload_read));
+ iov_read[0].iov_base = payload_read;
+ iov_read[0].iov_len = 3 * 4096;
+ iov_read[1].iov_base = payload_read + 3 * 4096;
+ iov_read[1].iov_len = 4 * 4096;
+ iov_read[2].iov_base = payload_read + 7 * 4096;
+ iov_read[2].iov_len = 3 * 4096;
+ spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_free_io_channel(channel);
+
+ /* Unload the blob store */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+}
+
+/**
+ * Inflate / decouple parent rw unit tests.
+ *
+ * --------------
+ * original blob: 0 1 2 3 4
+ * ,---------+---------+---------+---------+---------.
+ * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - |
+ * +---------+---------+---------+---------+---------+
+ * snapshot2 | - |yyyyyyyyy| - |yyyyyyyyy| - |
+ * +---------+---------+---------+---------+---------+
+ * blob | - |zzzzzzzzz| - | - | - |
+ * '---------+---------+---------+---------+---------'
+ * . . . . . .
+ * -------- . . . . . .
+ * inflate: . . . . . .
+ * ,---------+---------+---------+---------+---------.
+ * blob |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000|
+ * '---------+---------+---------+---------+---------'
+ *
+ * NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency
+ * on snapshot2 and snapshot removed . . .
+ * . . . . . .
+ * ---------------- . . . . . .
+ * decouple parent: . . . . . .
+ * ,---------+---------+---------+---------+---------.
+ * snapshot |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx| - |
+ * +---------+---------+---------+---------+---------+
+ * blob | - |zzzzzzzzz| - |yyyyyyyyy| - |
+ * '---------+---------+---------+---------+---------'
+ *
+ * NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency
+ * on snapshot2 removed and on snapshot still exists. Snapshot2
+ * should remain a clone of snapshot.
+ */
+static void
+_blob_inflate_rw(bool decouple_parent)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob, *snapshot, *snapshot2;
+ struct spdk_io_channel *channel;
+ struct spdk_blob_opts opts;
+ spdk_blob_id blobid, snapshotid, snapshot2id;
+ uint64_t free_clusters;
+ uint64_t cluster_size;
+
+ uint64_t payload_size;
+ uint8_t *payload_read;
+ uint8_t *payload_write;
+ uint8_t *payload_clone;
+
+ uint64_t pages_per_cluster;
+ uint64_t pages_per_payload;
+
+ int i;
+ spdk_blob_id ids[2];
+ size_t count;
+
+ dev = init_dev();
+
+ spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+ cluster_size = spdk_bs_get_cluster_size(bs);
+ pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs);
+ pages_per_payload = pages_per_cluster * 5;
+
+ payload_size = cluster_size * 5;
+
+ payload_read = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_read != NULL);
+
+ payload_write = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_write != NULL);
+
+ payload_clone = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(payload_clone != NULL);
+
+ channel = spdk_bs_alloc_io_channel(bs);
+ SPDK_CU_ASSERT_FATAL(channel != NULL);
+
+ /* Create blob */
+ spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+ opts.num_clusters = 5;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
+
+ /* 1) Initial read should return zeroed payload */
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
+ blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
+
+ /* Fill whole blob with a pattern, except last cluster (to be sure it
+ * isn't allocated) */
+ memset(payload_write, 0xE5, payload_size - cluster_size);
+ spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload -
+ pages_per_cluster, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
+
+ /* 2) Create snapshot from blob (first level) */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+ CU_ASSERT(snapshot->data_ro == true)
+ CU_ASSERT(snapshot->md_ro == true)
+
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5)
+
+ /* Write every second cluster with a pattern.
+ *
+ * Last cluster shouldn't be written, to be sure that snapshot nor clone
+ * doesn't allocate it.
+ *
+ * payload_clone stores expected result on "blob" read at the time and
+ * is used only to check data consistency on clone before and after
+ * inflation. Initially we fill it with a backing snapshots pattern
+ * used before.
+ */
+ memset(payload_clone, 0xE5, payload_size - cluster_size);
+ memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size);
+ memset(payload_write, 0xAA, payload_size);
+ for (i = 1; i < 5; i += 2) {
+ spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster,
+ pages_per_cluster, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Update expected result */
+ memcpy(payload_clone + (cluster_size * i), payload_write,
+ cluster_size);
+ }
+ CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
+
+ /* Check data consistency on clone */
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
+ blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
+
+ /* 3) Create second levels snapshot from blob */
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshot2id = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot2 = g_blob;
+ CU_ASSERT(snapshot2->data_ro == true)
+ CU_ASSERT(snapshot2->md_ro == true)
+
+ CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5)
+
+ CU_ASSERT(snapshot2->parent_id == snapshotid);
+
+ /* Write one cluster on the top level blob. This cluster (1) covers
+ * already allocated cluster in the snapshot2, so shouldn't be inflated
+ * at all */
+ spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster,
+ pages_per_cluster, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Update expected result */
+ memcpy(payload_clone + cluster_size, payload_write, cluster_size);
+
+ /* Check data consistency on clone */
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
+ blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
+
+
+ /* Close all blobs */
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot2, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Check snapshot-clone relations */
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == snapshot2id);
+
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == blobid);
+
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id);
+
+ free_clusters = spdk_bs_free_cluster_count(bs);
+ if (!decouple_parent) {
+ /* Do full blob inflation */
+ spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* All clusters should be inflated (except one already allocated
+ * in a top level blob) */
+ CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4);
+
+ /* Check if relation tree updated correctly */
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
+
+ /* snapshotid have one clone */
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == snapshot2id);
+
+ /* snapshot2id have no clones */
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
+ CU_ASSERT(count == 0);
+
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
+ } else {
+ /* Decouple parent of blob */
+ spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Only one cluster from a parent should be inflated (second one
+ * is covered by a cluster written on a top level blob, and
+ * already allocated) */
+ CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1);
+
+ /* Check if relation tree updated correctly */
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
+
+ /* snapshotid have two clones now */
+ CU_ASSERT(count == 2);
+ CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
+ CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id);
+
+ /* snapshot2id have no clones */
+ count = 2;
+ CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
+ CU_ASSERT(count == 0);
+
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
+ }
+
+ /* Try to delete snapshot2 (should pass) */
+ spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Try to delete base snapshot (for decouple_parent should fail while
+ * dependency still exists) */
+ spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
+ CU_ASSERT(decouple_parent || g_bserrno == 0);
+ CU_ASSERT(!decouple_parent || g_bserrno != 0);
+
+ /* Reopen blob after snapshot deletion */
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
+
+ /* Check data consistency on inflated blob */
+ memset(payload_read, 0xFF, payload_size);
+ spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
+ blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_free_io_channel(channel);
+
+ /* Unload the blob store */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+
+ free(payload_read);
+ free(payload_write);
+ free(payload_clone);
+}
+
+static void
+blob_inflate_rw(void)
+{
+ _blob_inflate_rw(false);
+ _blob_inflate_rw(true);
+}
+
+/**
+ * Snapshot-clones relation test
+ *
+ * snapshot
+ * |
+ * +-----+-----+
+ * | |
+ * blob(ro) snapshot2
+ * | |
+ * clone2 clone
+ */
+static void
+blob_relations(void)
+{
+ struct spdk_blob_store *bs;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_opts bs_opts;
+ struct spdk_blob_opts opts;
+ struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2;
+ spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2;
+ int rc;
+ size_t count;
+ spdk_blob_id ids[10] = {};
+
+ dev = init_dev();
+ spdk_bs_opts_init(&bs_opts);
+ snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
+
+ spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+ /* 1. Create blob with 10 clusters */
+
+ spdk_blob_opts_init(&opts);
+ opts.num_clusters = 10;
+
+ spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ blob = g_blob;
+
+ CU_ASSERT(!spdk_blob_is_read_only(blob));
+ CU_ASSERT(!spdk_blob_is_snapshot(blob));
+ CU_ASSERT(!spdk_blob_is_clone(blob));
+ CU_ASSERT(!spdk_blob_is_thin_provisioned(blob));
+
+ /* blob should not have underlying snapshot nor clones */
+ CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, blobid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 0);
+
+
+ /* 2. Create snapshot */
+
+ spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot = g_blob;
+
+ CU_ASSERT(spdk_blob_is_read_only(snapshot));
+ CU_ASSERT(spdk_blob_is_snapshot(snapshot));
+ CU_ASSERT(!spdk_blob_is_clone(snapshot));
+ CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
+
+ /* Check if original blob is converted to the clone of snapshot */
+ CU_ASSERT(!spdk_blob_is_read_only(blob));
+ CU_ASSERT(!spdk_blob_is_snapshot(blob));
+ CU_ASSERT(spdk_blob_is_clone(blob));
+ CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
+ CU_ASSERT(blob->parent_id == snapshotid);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == blobid);
+
+
+ /* 3. Create clone from snapshot */
+
+ spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ cloneid = g_blobid;
+
+ spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ clone = g_blob;
+
+ CU_ASSERT(!spdk_blob_is_read_only(clone));
+ CU_ASSERT(!spdk_blob_is_snapshot(clone));
+ CU_ASSERT(spdk_blob_is_clone(clone));
+ CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
+ CU_ASSERT(clone->parent_id == snapshotid);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 0);
+
+ /* Check if clone is on the snapshot's list */
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
+ CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
+
+
+ /* 4. Create snapshot of the clone */
+
+ spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ snapshotid2 = g_blobid;
+
+ spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ snapshot2 = g_blob;
+
+ CU_ASSERT(spdk_blob_is_read_only(snapshot2));
+ CU_ASSERT(spdk_blob_is_snapshot(snapshot2));
+ CU_ASSERT(spdk_blob_is_clone(snapshot2));
+ CU_ASSERT(snapshot2->parent_id == snapshotid);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
+
+ /* Check if clone is converted to the clone of snapshot2 and snapshot2
+ * is a child of snapshot */
+ CU_ASSERT(!spdk_blob_is_read_only(clone));
+ CU_ASSERT(!spdk_blob_is_snapshot(clone));
+ CU_ASSERT(spdk_blob_is_clone(clone));
+ CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
+ CU_ASSERT(clone->parent_id == snapshotid2);
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid);
+
+
+ /* 5. Try to create clone from read only blob */
+
+ /* Mark blob as read only */
+ spdk_blob_set_read_only(blob);
+ spdk_blob_sync_md(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Check if previously created blob is read only clone */
+ CU_ASSERT(spdk_blob_is_read_only(blob));
+ CU_ASSERT(!spdk_blob_is_snapshot(blob));
+ CU_ASSERT(spdk_blob_is_clone(blob));
+ CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
+
+ /* Create clone from read only blob */
+ spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ cloneid2 = g_blobid;
+
+ spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_blob != NULL);
+ clone2 = g_blob;
+
+ CU_ASSERT(!spdk_blob_is_read_only(clone2));
+ CU_ASSERT(!spdk_blob_is_snapshot(clone2));
+ CU_ASSERT(spdk_blob_is_clone(clone2));
+ CU_ASSERT(spdk_blob_is_thin_provisioned(clone2));
+
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, blobid, ids, &count);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid2);
+
+ /* Close blobs */
+
+ spdk_blob_close(clone2, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(clone, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_blob_close(snapshot2, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Try to delete snapshot with created clones */
+ spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno != 0);
+
+ spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno != 0);
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+
+ /* Load an existing blob store */
+ dev = init_dev();
+ snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
+
+ spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+ bs = g_bs;
+
+
+ /* NULL ids array should return number of clones in count */
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count);
+ CU_ASSERT(rc == -ENOMEM);
+ CU_ASSERT(count == 2);
+
+ /* incorrect array size */
+ count = 1;
+ rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
+ CU_ASSERT(rc == -ENOMEM);
+ CU_ASSERT(count == 2);
+
+
+ /* Verify structure of loaded blob store */
+
+ /* snapshot */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
+
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 2);
+ CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
+ CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2);
+
+ /* blob */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, blobid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid2);
+
+ /* clone */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 0);
+
+ /* snapshot2 */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 1);
+ CU_ASSERT(ids[0] == cloneid);
+
+ /* clone2 */
+ CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
+ count = SPDK_COUNTOF(ids);
+ rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(count == 0);
+
+ /* Try to delete all blobs in the worse possible order */
+
+ spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno != 0);
+
+ spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno != 0);
+
+ spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno != 0);
+
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno != 0);
+
+ spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ spdk_bs_unload(bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ g_bs = NULL;
+}
+
+static void
+test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
+{
+ uint8_t payload_ff[64 * 512];
+ uint8_t payload_aa[64 * 512];
+ uint8_t payload_00[64 * 512];
+ uint8_t *cluster0, *cluster1;
+
+ memset(payload_ff, 0xFF, sizeof(payload_ff));
+ memset(payload_aa, 0xAA, sizeof(payload_aa));
+ memset(payload_00, 0x00, sizeof(payload_00));
+
+ /* Try to perform I/O with io unit = 512 */
+ spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* If thin provisioned is set cluster should be allocated now */
+ SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
+ cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
+
+ /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
+ * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
+ /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
+
+ /* Verify write with offset on first page */
+ spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Verify write with offset on first page */
+ spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
+
+ /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
+
+ /* Verify write with offset on second page */
+ spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
+
+ /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
+
+ /* Verify write across multiple pages */
+ spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
+
+ /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
+
+ /* Verify write across multiple clusters */
+ spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL);
+
+ SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
+ cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
+
+ /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
+
+ CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Verify write to second cluster */
+ spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL);
+
+ SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
+ cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
+
+ /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
+
+ CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
+}
+
+static void
+test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
+{
+ uint8_t payload_read[64 * 512];
+ uint8_t payload_ff[64 * 512];
+ uint8_t payload_aa[64 * 512];
+ uint8_t payload_00[64 * 512];
+
+ memset(payload_ff, 0xFF, sizeof(payload_ff));
+ memset(payload_aa, 0xAA, sizeof(payload_aa));
+ memset(payload_00, 0x00, sizeof(payload_00));
+
+ /* Read only first io unit */
+ /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: F000 0000 | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
+
+ /* Read four io_units starting from offset = 2
+ * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: F0AA 0000 | 0000 0000 ... */
+
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Read eight io_units across multiple pages
+ * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: AAAA AAAA | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
+
+ /* Read eight io_units across multiple clusters
+ * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
+ * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: FFFF FFFF | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
+
+ /* Read four io_units from second cluster
+ * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
+ * payload_read: 00FF 0000 | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Read second cluster
+ * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
+ * payload_read: FFFF 0000 | 0000 FF00 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
+
+ /* Read whole two clusters
+ * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
+
+ CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
+}
+
+
+static void
+test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
+{
+ uint8_t payload_ff[64 * 512];
+ uint8_t payload_aa[64 * 512];
+ uint8_t payload_00[64 * 512];
+ uint8_t *cluster0, *cluster1;
+
+ memset(payload_ff, 0xFF, sizeof(payload_ff));
+ memset(payload_aa, 0xAA, sizeof(payload_aa));
+ memset(payload_00, 0x00, sizeof(payload_00));
+
+ cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
+ cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
+
+ /* Unmap */
+ spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL);
+
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
+}
+
+static void
+test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
+{
+ uint8_t payload_ff[64 * 512];
+ uint8_t payload_aa[64 * 512];
+ uint8_t payload_00[64 * 512];
+ uint8_t *cluster0, *cluster1;
+
+ memset(payload_ff, 0xFF, sizeof(payload_ff));
+ memset(payload_aa, 0xAA, sizeof(payload_aa));
+ memset(payload_00, 0x00, sizeof(payload_00));
+
+ cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
+ cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
+
+ /* Write zeroes */
+ spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL);
+
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
+}
+
+
+static void
+test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
+{
+ uint8_t payload_ff[64 * 512];
+ uint8_t payload_aa[64 * 512];
+ uint8_t payload_00[64 * 512];
+ uint8_t *cluster0, *cluster1;
+ struct iovec iov[4];
+
+ memset(payload_ff, 0xFF, sizeof(payload_ff));
+ memset(payload_aa, 0xAA, sizeof(payload_aa));
+ memset(payload_00, 0x00, sizeof(payload_00));
+
+ /* Try to perform I/O with io unit = 512 */
+ iov[0].iov_base = payload_ff;
+ iov[0].iov_len = 1 * 512;
+ spdk_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* If thin provisioned is set cluster should be allocated now */
+ SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
+ cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
+
+ /* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
+ * Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
+ /* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
+
+ /* Verify write with offset on first page */
+ iov[0].iov_base = payload_ff;
+ iov[0].iov_len = 1 * 512;
+ spdk_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Verify write with offset on first page */
+ iov[0].iov_base = payload_ff;
+ iov[0].iov_len = 4 * 512;
+ spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
+
+ /* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
+
+ /* Verify write with offset on second page */
+ iov[0].iov_base = payload_ff;
+ iov[0].iov_len = 4 * 512;
+ spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
+
+ /* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
+
+ /* Verify write across multiple pages */
+ iov[0].iov_base = payload_aa;
+ iov[0].iov_len = 8 * 512;
+ spdk_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL);
+
+ /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
+
+ /* Verify write across multiple clusters */
+
+ iov[0].iov_base = payload_ff;
+ iov[0].iov_len = 8 * 512;
+ spdk_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL);
+
+ SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
+ cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
+
+ /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
+
+ CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Verify write to second cluster */
+
+ iov[0].iov_base = payload_ff;
+ iov[0].iov_len = 2 * 512;
+ spdk_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL);
+
+ SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
+ cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
+
+ /* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
+ CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
+
+ CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
+}
+
+static void
+test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
+{
+ uint8_t payload_read[64 * 512];
+ uint8_t payload_ff[64 * 512];
+ uint8_t payload_aa[64 * 512];
+ uint8_t payload_00[64 * 512];
+ struct iovec iov[4];
+
+ memset(payload_ff, 0xFF, sizeof(payload_ff));
+ memset(payload_aa, 0xAA, sizeof(payload_aa));
+ memset(payload_00, 0x00, sizeof(payload_00));
+
+ /* Read only first io unit */
+ /* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: F000 0000 | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ iov[0].iov_base = payload_read;
+ iov[0].iov_len = 1 * 512;
+ spdk_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL);
+
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
+
+ /* Read four io_units starting from offset = 2
+ * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: F0AA 0000 | 0000 0000 ... */
+
+ memset(payload_read, 0x00, sizeof(payload_read));
+ iov[0].iov_base = payload_read;
+ iov[0].iov_len = 4 * 512;
+ spdk_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Read eight io_units across multiple pages
+ * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: AAAA AAAA | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ iov[0].iov_base = payload_read;
+ iov[0].iov_len = 4 * 512;
+ iov[1].iov_base = payload_read + 4 * 512;
+ iov[1].iov_len = 4 * 512;
+ spdk_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
+
+ /* Read eight io_units across multiple clusters
+ * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
+ * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
+ * payload_read: FFFF FFFF | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ iov[0].iov_base = payload_read;
+ iov[0].iov_len = 2 * 512;
+ iov[1].iov_base = payload_read + 2 * 512;
+ iov[1].iov_len = 2 * 512;
+ iov[2].iov_base = payload_read + 4 * 512;
+ iov[2].iov_len = 2 * 512;
+ iov[3].iov_base = payload_read + 6 * 512;
+ iov[3].iov_len = 2 * 512;
+ spdk_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
+
+ /* Read four io_units from second cluster
+ * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
+ * payload_read: 00FF 0000 | 0000 0000 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ iov[0].iov_base = payload_read;
+ iov[0].iov_len = 1 * 512;
+ iov[1].iov_base = payload_read + 1 * 512;
+ iov[1].iov_len = 3 * 512;
+ spdk_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
+
+ /* Read second cluster
+ * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
+ * payload_read: FFFF 0000 | 0000 FF00 ... */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ iov[0].iov_base = payload_read;
+ iov[0].iov_len = 1 * 512;
+ iov[1].iov_base = payload_read + 1 * 512;
+ iov[1].iov_len = 2 * 512;
+ iov[2].iov_base = payload_read + 3 * 512;
+ iov[2].iov_len = 4 * 512;
+ iov[3].iov_base = payload_read + 7 * 512;
+ iov[3].iov_len = 25 * 512;
+ spdk_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
+
+ /* Read whole two clusters
+ * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
+ * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
+ memset(payload_read, 0x00, sizeof(payload_read));
+ iov[0].iov_base = payload_read;
+ iov[0].iov_len = 1 * 512;
+ iov[1].iov_base = payload_read + 1 * 512;
+ iov[1].iov_len = 8 * 512;
+ iov[2].iov_base = payload_read + 9 * 512;
+ iov[2].iov_len = 16 * 512;
+ iov[3].iov_base = payload_read + 25 * 512;
+ iov[3].iov_len = 39 * 512;
+ spdk_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
+
+ CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
+ CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
+}
+
+static void
+blob_io_unit(void)
+{
+ struct spdk_bs_opts bsopts;
+ struct spdk_blob_opts opts;
+ struct spdk_bs_dev *dev;
+ struct spdk_blob *blob, *snapshot, *clone;
+ spdk_blob_id blobid;
+ struct spdk_io_channel *channel;
+
+ /* Create dev with 512 bytes io unit size */
+
+ spdk_bs_opts_init(&bsopts);
+ bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; // 8 * 4 = 32 io_unit
+ snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
+
+ /* Try to initialize a new blob store with unsupported io_unit */
+ dev = init_dev();
+ dev->blocklen = 512;
+ dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ CU_ASSERT(spdk_bs_get_io_unit_size(g_bs) == 512);
+ channel = spdk_bs_alloc_io_channel(g_bs);
+
+ /* Create thick provisioned blob */
+ spdk_blob_opts_init(&opts);
+ opts.thin_provision = false;
+ opts.num_clusters = 32;
+
+ spdk_bs_create_blob_ext(g_bs, &opts, blob_op_with_id_complete, NULL);
+
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(g_bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ test_io_write(dev, blob, channel);
+ test_io_read(dev, blob, channel);
+ test_io_zeroes(dev, blob, channel);
+
+ test_iov_write(dev, blob, channel);
+ test_iov_read(dev, blob, channel);
+
+ test_io_unmap(dev, blob, channel);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+
+ /* Create thin provisioned blob */
+
+ spdk_blob_opts_init(&opts);
+ opts.thin_provision = true;
+ opts.num_clusters = 32;
+
+ spdk_bs_create_blob_ext(g_bs, &opts, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(g_bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ blob = g_blob;
+
+ test_io_write(dev, blob, channel);
+ test_io_read(dev, blob, channel);
+
+ test_io_zeroes(dev, blob, channel);
+
+ test_iov_write(dev, blob, channel);
+ test_iov_read(dev, blob, channel);
+
+ /* Create snapshot */
+
+ spdk_bs_create_snapshot(g_bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(g_bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ snapshot = g_blob;
+
+ spdk_bs_create_clone(g_bs, blobid, NULL, blob_op_with_id_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
+ blobid = g_blobid;
+
+ spdk_bs_open_blob(g_bs, blobid, blob_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ CU_ASSERT(g_blob != NULL);
+ clone = g_blob;
+
+ test_io_read(dev, blob, channel);
+ test_io_read(dev, snapshot, channel);
+ test_io_read(dev, clone, channel);
+
+ test_iov_read(dev, blob, channel);
+ test_iov_read(dev, snapshot, channel);
+ test_iov_read(dev, clone, channel);
+
+ /* Inflate clone */
+
+ spdk_bs_inflate_blob(g_bs, channel, blobid, blob_op_complete, NULL);
+
+ CU_ASSERT(g_bserrno == 0);
+
+ test_io_read(dev, clone, channel);
+
+ test_io_unmap(dev, clone, channel);
+
+ test_iov_write(dev, clone, channel);
+ test_iov_read(dev, clone, channel);
+
+ spdk_blob_close(blob, blob_op_complete, NULL);
+ spdk_blob_close(snapshot, blob_op_complete, NULL);
+ spdk_blob_close(clone, blob_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ blob = NULL;
+ g_blob = NULL;
+
+ /* Unload the blob store */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+}
+
+static void
+blob_io_unit_compatiblity(void)
+{
+ struct spdk_bs_opts bsopts;
+ struct spdk_bs_dev *dev;
+ struct spdk_bs_super_block *super;
+
+ /* Create dev with 512 bytes io unit size */
+
+ spdk_bs_opts_init(&bsopts);
+ bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4; // 8 * 4 = 32 io_unit
+ snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
+
+ /* Try to initialize a new blob store with unsupported io_unit */
+ dev = init_dev();
+ dev->blocklen = 512;
+ dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
+
+ /* Initialize a new blob store */
+ spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ CU_ASSERT(spdk_bs_get_io_unit_size(g_bs) == 512);
+
+ /* Unload the blob store */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ /* Modify super block to behave like older version.
+ * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
+ super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
+ super->io_unit_size = 0;
+ super->crc = _spdk_blob_md_page_calc_crc(super);
+
+ dev = init_dev();
+ dev->blocklen = 512;
+ dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
+
+ spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_bs != NULL);
+
+ CU_ASSERT(spdk_bs_get_io_unit_size(g_bs) == SPDK_BS_PAGE_SIZE);
+
+ /* Unload the blob store */
+ spdk_bs_unload(g_bs, bs_op_complete, NULL);
+ CU_ASSERT(g_bserrno == 0);
+
+ g_bs = NULL;
+ g_blob = NULL;
+ g_blobid = 0;
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("blob", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "blob_init", blob_init) == NULL ||
+ CU_add_test(suite, "blob_open", blob_open) == NULL ||
+ CU_add_test(suite, "blob_create", blob_create) == NULL ||
+ CU_add_test(suite, "blob_create_internal", blob_create_internal) == NULL ||
+ CU_add_test(suite, "blob_thin_provision", blob_thin_provision) == NULL ||
+ CU_add_test(suite, "blob_snapshot", blob_snapshot) == NULL ||
+ CU_add_test(suite, "blob_clone", blob_clone) == NULL ||
+ CU_add_test(suite, "blob_inflate", blob_inflate) == NULL ||
+ CU_add_test(suite, "blob_delete", blob_delete) == NULL ||
+ CU_add_test(suite, "blob_resize", blob_resize) == NULL ||
+ CU_add_test(suite, "blob_read_only", blob_read_only) == NULL ||
+ CU_add_test(suite, "channel_ops", channel_ops) == NULL ||
+ CU_add_test(suite, "blob_super", blob_super) == NULL ||
+ CU_add_test(suite, "blob_write", blob_write) == NULL ||
+ CU_add_test(suite, "blob_read", blob_read) == NULL ||
+ CU_add_test(suite, "blob_rw_verify", blob_rw_verify) == NULL ||
+ CU_add_test(suite, "blob_rw_verify_iov", blob_rw_verify_iov) == NULL ||
+ CU_add_test(suite, "blob_rw_verify_iov_nomem", blob_rw_verify_iov_nomem) == NULL ||
+ CU_add_test(suite, "blob_rw_iov_read_only", blob_rw_iov_read_only) == NULL ||
+ CU_add_test(suite, "blob_unmap", blob_unmap) == NULL ||
+ CU_add_test(suite, "blob_iter", blob_iter) == NULL ||
+ CU_add_test(suite, "blob_xattr", blob_xattr) == NULL ||
+ CU_add_test(suite, "bs_load", bs_load) == NULL ||
+ CU_add_test(suite, "bs_load_custom_cluster_size", bs_load_custom_cluster_size) == NULL ||
+ CU_add_test(suite, "bs_unload", bs_unload) == NULL ||
+ CU_add_test(suite, "bs_cluster_sz", bs_cluster_sz) == NULL ||
+ CU_add_test(suite, "bs_usable_clusters", bs_usable_clusters) == NULL ||
+ CU_add_test(suite, "bs_resize_md", bs_resize_md) == NULL ||
+ CU_add_test(suite, "bs_destroy", bs_destroy) == NULL ||
+ CU_add_test(suite, "bs_type", bs_type) == NULL ||
+ CU_add_test(suite, "bs_super_block", bs_super_block) == NULL ||
+ CU_add_test(suite, "blob_serialize", blob_serialize) == NULL ||
+ CU_add_test(suite, "blob_crc", blob_crc) == NULL ||
+ CU_add_test(suite, "super_block_crc", super_block_crc) == NULL ||
+ CU_add_test(suite, "blob_dirty_shutdown", blob_dirty_shutdown) == NULL ||
+ CU_add_test(suite, "blob_flags", blob_flags) == NULL ||
+ CU_add_test(suite, "bs_version", bs_version) == NULL ||
+ CU_add_test(suite, "blob_set_xattrs", blob_set_xattrs) == NULL ||
+ CU_add_test(suite, "blob_thin_prov_alloc", blob_thin_prov_alloc) == NULL ||
+ CU_add_test(suite, "blob_insert_cluster_msg", blob_insert_cluster_msg) == NULL ||
+ CU_add_test(suite, "blob_thin_prov_rw", blob_thin_prov_rw) == NULL ||
+ CU_add_test(suite, "blob_thin_prov_rw_iov", blob_thin_prov_rw_iov) == NULL ||
+ CU_add_test(suite, "bs_load_iter", bs_load_iter) == NULL ||
+ CU_add_test(suite, "blob_snapshot_rw", blob_snapshot_rw) == NULL ||
+ CU_add_test(suite, "blob_snapshot_rw_iov", blob_snapshot_rw_iov) == NULL ||
+ CU_add_test(suite, "blob_relations", blob_relations) == NULL ||
+ CU_add_test(suite, "blob_inflate_rw", blob_inflate_rw) == NULL ||
+ CU_add_test(suite, "blob_snapshot_freeze_io", blob_snapshot_freeze_io) == NULL ||
+ CU_add_test(suite, "blob_operation_split_rw", blob_operation_split_rw) == NULL ||
+ CU_add_test(suite, "blob_operation_split_rw_iov", blob_operation_split_rw_iov) == NULL ||
+ CU_add_test(suite, "blob_io_unit", blob_io_unit) == NULL ||
+ CU_add_test(suite, "blob_io_unit_compatiblity", blob_io_unit_compatiblity) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
+ spdk_allocate_thread(_bs_send_msg, NULL, NULL, NULL, "thread0");
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ spdk_free_thread();
+ free(g_dev_buffer);
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/blob/bs_dev_common.c b/src/spdk/test/unit/lib/blob/bs_dev_common.c
new file mode 100644
index 00000000..fe310526
--- /dev/null
+++ b/src/spdk/test/unit/lib/blob/bs_dev_common.c
@@ -0,0 +1,225 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/thread.h"
+#include "bs_scheduler.c"
+
+
+#define DEV_BUFFER_SIZE (64 * 1024 * 1024)
+#define DEV_BUFFER_BLOCKLEN (4096)
+#define DEV_BUFFER_BLOCKCNT (DEV_BUFFER_SIZE / DEV_BUFFER_BLOCKLEN)
+uint8_t *g_dev_buffer;
+uint64_t g_dev_write_bytes;
+uint64_t g_dev_read_bytes;
+
+/* Define here for UT only. */
+struct spdk_io_channel g_io_channel;
+
+static struct spdk_io_channel *
+dev_create_channel(struct spdk_bs_dev *dev)
+{
+ return &g_io_channel;
+}
+
+static void
+dev_destroy_channel(struct spdk_bs_dev *dev, struct spdk_io_channel *channel)
+{
+}
+
+static void
+dev_destroy(struct spdk_bs_dev *dev)
+{
+ free(dev);
+}
+
+
+static void
+dev_complete_cb(void *arg)
+{
+ struct spdk_bs_dev_cb_args *cb_args = arg;
+
+ cb_args->cb_fn(cb_args->channel, cb_args->cb_arg, 0);
+}
+
+static void
+dev_complete(void *arg)
+{
+ _bs_send_msg(dev_complete_cb, arg, NULL);
+}
+
+static void
+dev_read(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_bs_dev_cb_args *cb_args)
+{
+ uint64_t offset, length;
+
+ offset = lba * dev->blocklen;
+ length = lba_count * dev->blocklen;
+ SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
+ memcpy(payload, &g_dev_buffer[offset], length);
+ g_dev_read_bytes += length;
+ spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
+}
+
+static void
+dev_write(struct spdk_bs_dev *dev, struct spdk_io_channel *channel, void *payload,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_bs_dev_cb_args *cb_args)
+{
+ uint64_t offset, length;
+
+ offset = lba * dev->blocklen;
+ length = lba_count * dev->blocklen;
+ SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
+ memcpy(&g_dev_buffer[offset], payload, length);
+ g_dev_write_bytes += length;
+ spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
+}
+
+static void
+__check_iov(struct iovec *iov, int iovcnt, uint64_t length)
+{
+ int i;
+
+ for (i = 0; i < iovcnt; i++) {
+ length -= iov[i].iov_len;
+ }
+
+ CU_ASSERT(length == 0);
+}
+
+static void
+dev_readv(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
+ struct iovec *iov, int iovcnt,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_bs_dev_cb_args *cb_args)
+{
+ uint64_t offset, length;
+ int i;
+
+ offset = lba * dev->blocklen;
+ length = lba_count * dev->blocklen;
+ SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
+ __check_iov(iov, iovcnt, length);
+
+ for (i = 0; i < iovcnt; i++) {
+ memcpy(iov[i].iov_base, &g_dev_buffer[offset], iov[i].iov_len);
+ offset += iov[i].iov_len;
+ }
+
+ g_dev_read_bytes += length;
+ spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
+}
+
+static void
+dev_writev(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
+ struct iovec *iov, int iovcnt,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_bs_dev_cb_args *cb_args)
+{
+ uint64_t offset, length;
+ int i;
+
+ offset = lba * dev->blocklen;
+ length = lba_count * dev->blocklen;
+ SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
+ __check_iov(iov, iovcnt, length);
+
+ for (i = 0; i < iovcnt; i++) {
+ memcpy(&g_dev_buffer[offset], iov[i].iov_base, iov[i].iov_len);
+ offset += iov[i].iov_len;
+ }
+
+ g_dev_write_bytes += length;
+ spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
+}
+
+static void
+dev_flush(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
+ struct spdk_bs_dev_cb_args *cb_args)
+{
+ spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
+}
+
+static void
+dev_unmap(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_bs_dev_cb_args *cb_args)
+{
+ uint64_t offset, length;
+
+ offset = lba * dev->blocklen;
+ length = lba_count * dev->blocklen;
+ SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
+ memset(&g_dev_buffer[offset], 0, length);
+ spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
+}
+
+static void
+dev_write_zeroes(struct spdk_bs_dev *dev, struct spdk_io_channel *channel,
+ uint64_t lba, uint32_t lba_count,
+ struct spdk_bs_dev_cb_args *cb_args)
+{
+ uint64_t offset, length;
+
+ offset = lba * dev->blocklen;
+ length = lba_count * dev->blocklen;
+ SPDK_CU_ASSERT_FATAL(offset + length <= DEV_BUFFER_SIZE);
+ memset(&g_dev_buffer[offset], 0, length);
+ g_dev_write_bytes += length;
+ spdk_thread_send_msg(spdk_get_thread(), dev_complete, cb_args);
+}
+
+static struct spdk_bs_dev *
+init_dev(void)
+{
+ struct spdk_bs_dev *dev = calloc(1, sizeof(*dev));
+
+ SPDK_CU_ASSERT_FATAL(dev != NULL);
+
+ dev->create_channel = dev_create_channel;
+ dev->destroy_channel = dev_destroy_channel;
+ dev->destroy = dev_destroy;
+ dev->read = dev_read;
+ dev->write = dev_write;
+ dev->readv = dev_readv;
+ dev->writev = dev_writev;
+ dev->flush = dev_flush;
+ dev->unmap = dev_unmap;
+ dev->write_zeroes = dev_write_zeroes;
+ dev->blockcnt = DEV_BUFFER_BLOCKCNT;
+ dev->blocklen = DEV_BUFFER_BLOCKLEN;
+
+ return dev;
+}
diff --git a/src/spdk/test/unit/lib/blob/bs_scheduler.c b/src/spdk/test/unit/lib/blob/bs_scheduler.c
new file mode 100644
index 00000000..76fa067e
--- /dev/null
+++ b/src/spdk/test/unit/lib/blob/bs_scheduler.c
@@ -0,0 +1,87 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+bool g_scheduler_delay = false;
+
+struct scheduled_ops {
+ spdk_thread_fn fn;
+ void *ctx;
+
+ TAILQ_ENTRY(scheduled_ops) ops_queue;
+};
+
+static TAILQ_HEAD(, scheduled_ops) g_scheduled_ops = TAILQ_HEAD_INITIALIZER(g_scheduled_ops);
+
+void _bs_flush_scheduler(uint32_t);
+
+static void
+_bs_send_msg(spdk_thread_fn fn, void *ctx, void *thread_ctx)
+{
+ if (g_scheduler_delay) {
+ struct scheduled_ops *ops = calloc(1, sizeof(*ops));
+
+ SPDK_CU_ASSERT_FATAL(ops != NULL);
+ ops->fn = fn;
+ ops->ctx = ctx;
+ TAILQ_INSERT_TAIL(&g_scheduled_ops, ops, ops_queue);
+
+ } else {
+ fn(ctx);
+ }
+}
+
+static void
+_bs_flush_scheduler_single(void)
+{
+ struct scheduled_ops *op;
+ TAILQ_HEAD(, scheduled_ops) ops;
+ TAILQ_INIT(&ops);
+
+ TAILQ_SWAP(&g_scheduled_ops, &ops, scheduled_ops, ops_queue);
+
+ while (!TAILQ_EMPTY(&ops)) {
+ op = TAILQ_FIRST(&ops);
+ TAILQ_REMOVE(&ops, op, ops_queue);
+
+ op->fn(op->ctx);
+ free(op);
+ }
+}
+
+void
+_bs_flush_scheduler(uint32_t n)
+{
+ while (n--) {
+ _bs_flush_scheduler_single();
+ }
+}
diff --git a/src/spdk/test/unit/lib/blobfs/Makefile b/src/spdk/test/unit/lib/blobfs/Makefile
new file mode 100644
index 00000000..dfb98f23
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = tree.c blobfs_async_ut blobfs_sync_ut
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/.gitignore b/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/.gitignore
new file mode 100644
index 00000000..aea3b021
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/.gitignore
@@ -0,0 +1 @@
+blobfs_async_ut
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/Makefile b/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/Makefile
new file mode 100644
index 00000000..e6dd0ce2
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/Makefile
@@ -0,0 +1,41 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+SPDK_LIB_LIST = blob
+TEST_FILE = blobfs_async_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/blobfs_async_ut.c b/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/blobfs_async_ut.c
new file mode 100644
index 00000000..baf4bc7f
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_async_ut/blobfs_async_ut.c
@@ -0,0 +1,522 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "CUnit/Basic.h"
+
+#include "common/lib/test_env.c"
+
+#include "spdk_cunit.h"
+#include "blobfs/blobfs.c"
+#include "blobfs/tree.c"
+
+#include "unit/lib/blob/bs_dev_common.c"
+
+struct spdk_filesystem *g_fs;
+struct spdk_file *g_file;
+int g_fserrno;
+
+/* Return NULL to test hardcoded defaults. */
+struct spdk_conf_section *
+spdk_conf_find_section(struct spdk_conf *cp, const char *name)
+{
+ return NULL;
+}
+
+/* Return -1 to test hardcoded defaults. */
+int
+spdk_conf_section_get_intval(struct spdk_conf_section *sp, const char *key)
+{
+ return -1;
+}
+
+static void
+_fs_send_msg(spdk_thread_fn fn, void *ctx, void *thread_ctx)
+{
+ fn(ctx);
+}
+
+static void
+fs_op_complete(void *ctx, int fserrno)
+{
+ g_fserrno = fserrno;
+}
+
+static void
+fs_op_with_handle_complete(void *ctx, struct spdk_filesystem *fs, int fserrno)
+{
+ g_fs = fs;
+ g_fserrno = fserrno;
+}
+
+static void
+fs_init(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_bs_dev *dev;
+
+ dev = init_dev();
+ spdk_allocate_thread(_fs_send_msg, NULL, NULL, NULL, "thread0");
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+
+ spdk_free_thread();
+}
+
+static void
+create_cb(void *ctx, int fserrno)
+{
+ g_fserrno = fserrno;
+}
+
+static void
+open_cb(void *ctx, struct spdk_file *f, int fserrno)
+{
+ g_fserrno = fserrno;
+ g_file = f;
+}
+
+static void
+delete_cb(void *ctx, int fserrno)
+{
+ g_fserrno = fserrno;
+}
+
+static void
+fs_open(void)
+{
+ struct spdk_filesystem *fs;
+ spdk_fs_iter iter;
+ struct spdk_bs_dev *dev;
+ struct spdk_file *file;
+ char name[257] = {'\0'};
+
+ dev = init_dev();
+ memset(name, 'a', sizeof(name) - 1);
+ spdk_allocate_thread(_fs_send_msg, NULL, NULL, NULL, "thread0");
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+
+ g_fserrno = 0;
+ /* Open should fail, because the file name is too long. */
+ spdk_fs_open_file_async(fs, name, SPDK_BLOBFS_OPEN_CREATE, open_cb, NULL);
+ CU_ASSERT(g_fserrno == -ENAMETOOLONG);
+
+ g_fserrno = 0;
+ spdk_fs_open_file_async(fs, "file1", 0, open_cb, NULL);
+ CU_ASSERT(g_fserrno == -ENOENT);
+
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_fs_open_file_async(fs, "file1", SPDK_BLOBFS_OPEN_CREATE, open_cb, NULL);
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+ CU_ASSERT(!strcmp("file1", g_file->name));
+ CU_ASSERT(g_file->ref_count == 1);
+
+ iter = spdk_fs_iter_first(fs);
+ CU_ASSERT(iter != NULL);
+ file = spdk_fs_iter_get_file(iter);
+ SPDK_CU_ASSERT_FATAL(file != NULL);
+ CU_ASSERT(!strcmp("file1", file->name));
+ iter = spdk_fs_iter_next(iter);
+ CU_ASSERT(iter == NULL);
+
+ g_fserrno = 0;
+ /* Delete should successful, we will mark the file as deleted. */
+ spdk_fs_delete_file_async(fs, "file1", delete_cb, NULL);
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&fs->files));
+
+ g_fserrno = 1;
+ spdk_file_close_async(g_file, fs_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(TAILQ_EMPTY(&fs->files));
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+
+ spdk_free_thread();
+}
+
+static void
+fs_create(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_bs_dev *dev;
+ char name[257] = {'\0'};
+
+ dev = init_dev();
+ memset(name, 'a', sizeof(name) - 1);
+ spdk_allocate_thread(_fs_send_msg, NULL, NULL, NULL, "thread0");
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+
+ g_fserrno = 0;
+ /* Create should fail, because the file name is too long. */
+ spdk_fs_create_file_async(fs, name, create_cb, NULL);
+ CU_ASSERT(g_fserrno == -ENAMETOOLONG);
+
+ g_fserrno = 1;
+ spdk_fs_create_file_async(fs, "file1", create_cb, NULL);
+ CU_ASSERT(g_fserrno == 0);
+
+ g_fserrno = 1;
+ spdk_fs_create_file_async(fs, "file1", create_cb, NULL);
+ CU_ASSERT(g_fserrno == -EEXIST);
+
+ g_fserrno = 1;
+ spdk_fs_delete_file_async(fs, "file1", delete_cb, NULL);
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(TAILQ_EMPTY(&fs->files));
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+
+ spdk_free_thread();
+}
+
+static void
+fs_truncate(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_bs_dev *dev;
+
+ dev = init_dev();
+ spdk_allocate_thread(_fs_send_msg, NULL, NULL, NULL, "thread0");
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_fs_open_file_async(fs, "file1", SPDK_BLOBFS_OPEN_CREATE, open_cb, NULL);
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ g_fserrno = 1;
+ spdk_file_truncate_async(g_file, 18 * 1024 * 1024 + 1, fs_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(g_file->length == 18 * 1024 * 1024 + 1);
+
+ g_fserrno = 1;
+ spdk_file_truncate_async(g_file, 1, fs_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(g_file->length == 1);
+
+ g_fserrno = 1;
+ spdk_file_truncate_async(g_file, 18 * 1024 * 1024 + 1, fs_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(g_file->length == 18 * 1024 * 1024 + 1);
+
+ g_fserrno = 1;
+ spdk_file_close_async(g_file, fs_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(g_file->ref_count == 0);
+
+ g_fserrno = 1;
+ spdk_fs_delete_file_async(fs, "file1", delete_cb, NULL);
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(TAILQ_EMPTY(&fs->files));
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+
+ spdk_free_thread();
+}
+
+static void
+fs_rename(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_file *file, *file2;
+ struct spdk_bs_dev *dev;
+
+ dev = init_dev();
+ spdk_allocate_thread(_fs_send_msg, NULL, NULL, NULL, "thread0");
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+
+ g_fserrno = 1;
+ spdk_fs_create_file_async(fs, "file1", create_cb, NULL);
+ CU_ASSERT(g_fserrno == 0);
+
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_fs_open_file_async(fs, "file1", 0, open_cb, NULL);
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+ CU_ASSERT(g_file->ref_count == 1);
+
+ file = g_file;
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_file_close_async(file, fs_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(file->ref_count == 0);
+
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_fs_open_file_async(fs, "file2", SPDK_BLOBFS_OPEN_CREATE, open_cb, NULL);
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+ CU_ASSERT(g_file->ref_count == 1);
+
+ file2 = g_file;
+ g_file = NULL;
+ g_fserrno = 1;
+ spdk_file_close_async(file2, fs_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+ SPDK_CU_ASSERT_FATAL(file2->ref_count == 0);
+
+ /*
+ * Do a 3-way rename. This should delete the old "file2", then rename
+ * "file1" to "file2".
+ */
+ g_fserrno = 1;
+ spdk_fs_rename_file_async(fs, "file1", "file2", fs_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(file->ref_count == 0);
+ CU_ASSERT(!strcmp(file->name, "file2"));
+ CU_ASSERT(TAILQ_FIRST(&fs->files) == file);
+ CU_ASSERT(TAILQ_NEXT(file, tailq) == NULL);
+
+ g_fserrno = 0;
+ spdk_fs_delete_file_async(fs, "file1", delete_cb, NULL);
+ CU_ASSERT(g_fserrno == -ENOENT);
+ CU_ASSERT(!TAILQ_EMPTY(&fs->files));
+
+ g_fserrno = 1;
+ spdk_fs_delete_file_async(fs, "file2", delete_cb, NULL);
+ CU_ASSERT(g_fserrno == 0);
+ CU_ASSERT(TAILQ_EMPTY(&fs->files));
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+
+ spdk_free_thread();
+}
+
+static void
+tree_find_buffer_ut(void)
+{
+ struct cache_tree *root;
+ struct cache_tree *level1_0;
+ struct cache_tree *level0_0_0;
+ struct cache_tree *level0_0_12;
+ struct cache_buffer *leaf_0_0_4;
+ struct cache_buffer *leaf_0_12_8;
+ struct cache_buffer *leaf_9_23_15;
+ struct cache_buffer *buffer;
+
+ level1_0 = calloc(1, sizeof(struct cache_tree));
+ SPDK_CU_ASSERT_FATAL(level1_0 != NULL);
+ level0_0_0 = calloc(1, sizeof(struct cache_tree));
+ SPDK_CU_ASSERT_FATAL(level0_0_0 != NULL);
+ level0_0_12 = calloc(1, sizeof(struct cache_tree));
+ SPDK_CU_ASSERT_FATAL(level0_0_12 != NULL);
+ leaf_0_0_4 = calloc(1, sizeof(struct cache_buffer));
+ SPDK_CU_ASSERT_FATAL(leaf_0_0_4 != NULL);
+ leaf_0_12_8 = calloc(1, sizeof(struct cache_buffer));
+ SPDK_CU_ASSERT_FATAL(leaf_0_12_8 != NULL);
+ leaf_9_23_15 = calloc(1, sizeof(struct cache_buffer));
+ SPDK_CU_ASSERT_FATAL(leaf_9_23_15 != NULL);
+
+ level1_0->level = 1;
+ level0_0_0->level = 0;
+ level0_0_12->level = 0;
+
+ leaf_0_0_4->offset = CACHE_BUFFER_SIZE * 4;
+ level0_0_0->u.buffer[4] = leaf_0_0_4;
+ level0_0_0->present_mask |= (1ULL << 4);
+
+ leaf_0_12_8->offset = CACHE_TREE_LEVEL_SIZE(1) * 12 + CACHE_BUFFER_SIZE * 8;
+ level0_0_12->u.buffer[8] = leaf_0_12_8;
+ level0_0_12->present_mask |= (1ULL << 8);
+
+ level1_0->u.tree[0] = level0_0_0;
+ level1_0->present_mask |= (1ULL << 0);
+ level1_0->u.tree[12] = level0_0_12;
+ level1_0->present_mask |= (1ULL << 12);
+
+ buffer = spdk_tree_find_buffer(NULL, 0);
+ CU_ASSERT(buffer == NULL);
+
+ buffer = spdk_tree_find_buffer(level0_0_0, 0);
+ CU_ASSERT(buffer == NULL);
+
+ buffer = spdk_tree_find_buffer(level0_0_0, CACHE_TREE_LEVEL_SIZE(0) + 1);
+ CU_ASSERT(buffer == NULL);
+
+ buffer = spdk_tree_find_buffer(level0_0_0, leaf_0_0_4->offset);
+ CU_ASSERT(buffer == leaf_0_0_4);
+
+ buffer = spdk_tree_find_buffer(level1_0, leaf_0_0_4->offset);
+ CU_ASSERT(buffer == leaf_0_0_4);
+
+ buffer = spdk_tree_find_buffer(level1_0, leaf_0_12_8->offset);
+ CU_ASSERT(buffer == leaf_0_12_8);
+
+ buffer = spdk_tree_find_buffer(level1_0, leaf_0_12_8->offset + CACHE_BUFFER_SIZE - 1);
+ CU_ASSERT(buffer == leaf_0_12_8);
+
+ buffer = spdk_tree_find_buffer(level1_0, leaf_0_12_8->offset - 1);
+ CU_ASSERT(buffer == NULL);
+
+ leaf_9_23_15->offset = CACHE_TREE_LEVEL_SIZE(2) * 9 +
+ CACHE_TREE_LEVEL_SIZE(1) * 23 +
+ CACHE_BUFFER_SIZE * 15;
+ root = spdk_tree_insert_buffer(level1_0, leaf_9_23_15);
+ CU_ASSERT(root != level1_0);
+ buffer = spdk_tree_find_buffer(root, leaf_9_23_15->offset);
+ CU_ASSERT(buffer == leaf_9_23_15);
+ spdk_tree_free_buffers(root);
+ free(root);
+}
+
+static void
+channel_ops(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_bs_dev *dev;
+ struct spdk_io_channel *channel;
+
+ dev = init_dev();
+ spdk_allocate_thread(_fs_send_msg, NULL, NULL, NULL, "thread0");
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+
+ channel = spdk_fs_alloc_io_channel(fs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_fs_free_io_channel(channel);
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+ g_fs = NULL;
+
+ spdk_free_thread();
+}
+
+static void
+channel_ops_sync(void)
+{
+ struct spdk_filesystem *fs;
+ struct spdk_bs_dev *dev;
+ struct spdk_io_channel *channel;
+
+ dev = init_dev();
+ spdk_allocate_thread(_fs_send_msg, NULL, NULL, NULL, "thread0");
+
+ spdk_fs_init(dev, NULL, NULL, fs_op_with_handle_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+ fs = g_fs;
+
+ channel = spdk_fs_alloc_io_channel_sync(fs);
+ CU_ASSERT(channel != NULL);
+
+ spdk_fs_free_io_channel(channel);
+
+ g_fserrno = 1;
+ spdk_fs_unload(fs, fs_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+ g_fs = NULL;
+
+ spdk_free_thread();
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("blobfs_async_ut", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "fs_init", fs_init) == NULL ||
+ CU_add_test(suite, "fs_open", fs_open) == NULL ||
+ CU_add_test(suite, "fs_create", fs_create) == NULL ||
+ CU_add_test(suite, "fs_truncate", fs_truncate) == NULL ||
+ CU_add_test(suite, "fs_rename", fs_rename) == NULL ||
+ CU_add_test(suite, "tree_find_buffer", tree_find_buffer_ut) == NULL ||
+ CU_add_test(suite, "channel_ops", channel_ops) == NULL ||
+ CU_add_test(suite, "channel_ops_sync", channel_ops_sync) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ free(g_dev_buffer);
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/.gitignore b/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/.gitignore
new file mode 100644
index 00000000..93ef643f
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/.gitignore
@@ -0,0 +1 @@
+blobfs_sync_ut
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/Makefile b/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/Makefile
new file mode 100644
index 00000000..28f38421
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/Makefile
@@ -0,0 +1,41 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+SPDK_LIB_LIST = blob
+TEST_FILE = blobfs_sync_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/blobfs_sync_ut.c b/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/blobfs_sync_ut.c
new file mode 100644
index 00000000..140d99bd
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/blobfs_sync_ut/blobfs_sync_ut.c
@@ -0,0 +1,410 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/blobfs.h"
+#include "spdk/env.h"
+#include "spdk/log.h"
+#include "spdk/thread.h"
+#include "spdk/barrier.h"
+
+#include "spdk_cunit.h"
+#include "unit/lib/blob/bs_dev_common.c"
+#include "common/lib/test_env.c"
+#include "blobfs/blobfs.c"
+#include "blobfs/tree.c"
+
+struct spdk_filesystem *g_fs;
+struct spdk_file *g_file;
+int g_fserrno;
+
+/* Return NULL to test hardcoded defaults. */
+struct spdk_conf_section *
+spdk_conf_find_section(struct spdk_conf *cp, const char *name)
+{
+ return NULL;
+}
+
+/* Return -1 to test hardcoded defaults. */
+int
+spdk_conf_section_get_intval(struct spdk_conf_section *sp, const char *key)
+{
+ return -1;
+}
+
+static void
+_fs_send_msg(spdk_thread_fn fn, void *ctx, void *thread_ctx)
+{
+ fn(ctx);
+}
+
+struct ut_request {
+ fs_request_fn fn;
+ void *arg;
+ volatile int done;
+ int from_ut;
+};
+
+static struct ut_request *g_req = NULL;
+static pthread_mutex_t g_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static void
+send_request(fs_request_fn fn, void *arg)
+{
+ struct ut_request *req;
+
+ req = calloc(1, sizeof(*req));
+ assert(req != NULL);
+ req->fn = fn;
+ req->arg = arg;
+ req->done = 0;
+ req->from_ut = 0;
+
+ pthread_mutex_lock(&g_mutex);
+ g_req = req;
+ pthread_mutex_unlock(&g_mutex);
+}
+
+static void
+ut_send_request(fs_request_fn fn, void *arg)
+{
+ struct ut_request req;
+
+
+ req.fn = fn;
+ req.arg = arg;
+ req.done = 0;
+ req.from_ut = 1;
+
+ pthread_mutex_lock(&g_mutex);
+ g_req = &req;
+ pthread_mutex_unlock(&g_mutex);
+
+ while (1) {
+ pthread_mutex_lock(&g_mutex);
+ if (req.done == 1) {
+ pthread_mutex_unlock(&g_mutex);
+ break;
+ }
+ pthread_mutex_unlock(&g_mutex);
+ }
+
+ /*
+ * Make sure the address of the local req variable is not in g_req when we exit this
+ * function to make static analysis tools happy.
+ */
+ g_req = NULL;
+}
+
+static void
+fs_op_complete(void *ctx, int fserrno)
+{
+ g_fserrno = fserrno;
+}
+
+static void
+fs_op_with_handle_complete(void *ctx, struct spdk_filesystem *fs, int fserrno)
+{
+ g_fs = fs;
+ g_fserrno = fserrno;
+}
+
+static void
+_fs_init(void *arg)
+{
+ struct spdk_bs_dev *dev;
+
+ g_fs = NULL;
+ g_fserrno = -1;
+ dev = init_dev();
+ spdk_fs_init(dev, NULL, send_request, fs_op_with_handle_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_fs != NULL);
+ CU_ASSERT(g_fserrno == 0);
+}
+
+static void
+_fs_unload(void *arg)
+{
+ g_fserrno = -1;
+ spdk_fs_unload(g_fs, fs_op_complete, NULL);
+ CU_ASSERT(g_fserrno == 0);
+ g_fs = NULL;
+}
+
+static void
+cache_write(void)
+{
+ uint64_t length;
+ int rc;
+ char buf[100];
+ struct spdk_io_channel *channel;
+
+ ut_send_request(_fs_init, NULL);
+
+ spdk_allocate_thread(_fs_send_msg, NULL, NULL, NULL, "thread0");
+ channel = spdk_fs_alloc_io_channel_sync(g_fs);
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ length = (4 * 1024 * 1024);
+ rc = spdk_file_truncate(g_file, channel, length);
+ CU_ASSERT(rc == 0);
+
+ spdk_file_write(g_file, channel, buf, 0, sizeof(buf));
+
+ CU_ASSERT(spdk_file_get_length(g_file) == length);
+
+ rc = spdk_file_truncate(g_file, channel, sizeof(buf));
+ CU_ASSERT(rc == 0);
+
+ spdk_file_close(g_file, channel);
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == -ENOENT);
+
+ spdk_fs_free_io_channel(channel);
+ spdk_free_thread();
+
+ ut_send_request(_fs_unload, NULL);
+}
+
+static void
+cache_write_null_buffer(void)
+{
+ uint64_t length;
+ int rc;
+ struct spdk_io_channel *channel;
+
+ ut_send_request(_fs_init, NULL);
+
+ spdk_allocate_thread(_fs_send_msg, NULL, NULL, NULL, "thread0");
+ channel = spdk_fs_alloc_io_channel_sync(g_fs);
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ length = 0;
+ rc = spdk_file_truncate(g_file, channel, length);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_file_write(g_file, channel, NULL, 0, 0);
+ CU_ASSERT(rc == 0);
+
+ spdk_file_close(g_file, channel);
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+
+ spdk_fs_free_io_channel(channel);
+ spdk_free_thread();
+
+ ut_send_request(_fs_unload, NULL);
+}
+
+static void
+fs_create_sync(void)
+{
+ int rc;
+ struct spdk_io_channel *channel;
+
+ ut_send_request(_fs_init, NULL);
+
+ spdk_allocate_thread(_fs_send_msg, NULL, NULL, NULL, "thread0");
+ channel = spdk_fs_alloc_io_channel_sync(g_fs);
+ CU_ASSERT(channel != NULL);
+
+ rc = spdk_fs_create_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+
+ /* Create should fail, because the file already exists. */
+ rc = spdk_fs_create_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc != 0);
+
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+
+ spdk_fs_free_io_channel(channel);
+ spdk_free_thread();
+
+ ut_send_request(_fs_unload, NULL);
+}
+
+static void
+cache_append_no_cache(void)
+{
+ int rc;
+ char buf[100];
+ struct spdk_io_channel *channel;
+
+ ut_send_request(_fs_init, NULL);
+
+ spdk_allocate_thread(_fs_send_msg, NULL, NULL, NULL, "thread0");
+ channel = spdk_fs_alloc_io_channel_sync(g_fs);
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ spdk_file_write(g_file, channel, buf, 0 * sizeof(buf), sizeof(buf));
+ CU_ASSERT(spdk_file_get_length(g_file) == 1 * sizeof(buf));
+ spdk_file_write(g_file, channel, buf, 1 * sizeof(buf), sizeof(buf));
+ CU_ASSERT(spdk_file_get_length(g_file) == 2 * sizeof(buf));
+ spdk_file_sync(g_file, channel);
+ cache_free_buffers(g_file);
+ spdk_file_write(g_file, channel, buf, 2 * sizeof(buf), sizeof(buf));
+ CU_ASSERT(spdk_file_get_length(g_file) == 3 * sizeof(buf));
+ spdk_file_write(g_file, channel, buf, 3 * sizeof(buf), sizeof(buf));
+ CU_ASSERT(spdk_file_get_length(g_file) == 4 * sizeof(buf));
+ spdk_file_write(g_file, channel, buf, 4 * sizeof(buf), sizeof(buf));
+ CU_ASSERT(spdk_file_get_length(g_file) == 5 * sizeof(buf));
+
+ spdk_file_close(g_file, channel);
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+
+ spdk_fs_free_io_channel(channel);
+ spdk_free_thread();
+
+ ut_send_request(_fs_unload, NULL);
+}
+
+static void
+fs_delete_file_without_close(void)
+{
+ int rc;
+ struct spdk_io_channel *channel;
+ struct spdk_file *file;
+
+ ut_send_request(_fs_init, NULL);
+ spdk_allocate_thread(_fs_send_msg, NULL, NULL, NULL, "thread0");
+ channel = spdk_fs_alloc_io_channel_sync(g_fs);
+ CU_ASSERT(channel != NULL);
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", SPDK_BLOBFS_OPEN_CREATE, &g_file);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_file != NULL);
+
+ rc = spdk_fs_delete_file(g_fs, channel, "testfile");
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_file->ref_count != 0);
+ CU_ASSERT(g_file->is_deleted == true);
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", 0, &file);
+ CU_ASSERT(rc != 0);
+
+ spdk_file_close(g_file, channel);
+
+ rc = spdk_fs_open_file(g_fs, channel, "testfile", 0, &file);
+ CU_ASSERT(rc != 0);
+
+ spdk_fs_free_io_channel(channel);
+ spdk_free_thread();
+
+ ut_send_request(_fs_unload, NULL);
+
+}
+
+static void
+terminate_spdk_thread(void *arg)
+{
+ spdk_free_thread();
+ pthread_exit(NULL);
+}
+
+static void *
+spdk_thread(void *arg)
+{
+ struct ut_request *req;
+
+ spdk_allocate_thread(_fs_send_msg, NULL, NULL, NULL, "thread0");
+
+ while (1) {
+ pthread_mutex_lock(&g_mutex);
+ if (g_req != NULL) {
+ req = g_req;
+ req->fn(req->arg);
+ req->done = 1;
+ if (!req->from_ut) {
+ free(req);
+ }
+ g_req = NULL;
+ }
+ pthread_mutex_unlock(&g_mutex);
+ }
+
+ return NULL;
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ pthread_t spdk_tid;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("blobfs_sync_ut", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "write", cache_write) == NULL ||
+ CU_add_test(suite, "write_null_buffer", cache_write_null_buffer) == NULL ||
+ CU_add_test(suite, "create_sync", fs_create_sync) == NULL ||
+ CU_add_test(suite, "append_no_cache", cache_append_no_cache) == NULL ||
+ CU_add_test(suite, "delete_file_without_close", fs_delete_file_without_close) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ pthread_create(&spdk_tid, NULL, spdk_thread, NULL);
+ g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ free(g_dev_buffer);
+ send_request(terminate_spdk_thread, NULL);
+ pthread_join(spdk_tid, NULL);
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/blobfs/tree.c/.gitignore b/src/spdk/test/unit/lib/blobfs/tree.c/.gitignore
new file mode 100644
index 00000000..57e77bf7
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/tree.c/.gitignore
@@ -0,0 +1 @@
+tree_ut
diff --git a/src/spdk/test/unit/lib/blobfs/tree.c/Makefile b/src/spdk/test/unit/lib/blobfs/tree.c/Makefile
new file mode 100644
index 00000000..64bc202a
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/tree.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = tree_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/blobfs/tree.c/tree_ut.c b/src/spdk/test/unit/lib/blobfs/tree.c/tree_ut.c
new file mode 100644
index 00000000..c24aaa78
--- /dev/null
+++ b/src/spdk/test/unit/lib/blobfs/tree.c/tree_ut.c
@@ -0,0 +1,159 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "blobfs/tree.c"
+
+void
+spdk_cache_buffer_free(struct cache_buffer *cache_buffer)
+{
+ free(cache_buffer);
+}
+
+static void
+blobfs_tree_op_test(void)
+{
+ struct cache_tree *tree;
+ struct cache_buffer *buffer[5];
+ struct cache_buffer *tmp_buffer;
+ int i;
+
+ for (i = 0; i < 5; i ++) {
+ buffer[i] = calloc(1, sizeof(struct cache_buffer));
+ SPDK_CU_ASSERT_FATAL(buffer[i]);
+ }
+
+ tree = calloc(1, sizeof(*tree));
+ SPDK_CU_ASSERT_FATAL(tree != NULL);
+
+ /* insert buffer[0] */
+ buffer[0]->offset = 0;
+ tree = spdk_tree_insert_buffer(tree, buffer[0]);
+ SPDK_CU_ASSERT_FATAL(tree != NULL);
+ CU_ASSERT(tree->level == 0);
+ tmp_buffer = spdk_tree_find_buffer(tree, buffer[0]->offset);
+ CU_ASSERT(tmp_buffer == buffer[0]);
+
+ /* insert buffer[1] */
+ buffer[1]->offset = CACHE_BUFFER_SIZE;
+ /* set the bytes_filled equal = bytes_filled with same non zero value, e.g., 32 */
+ buffer[1]->bytes_filled = buffer[1]->bytes_flushed = 32;
+ tree = spdk_tree_insert_buffer(tree, buffer[1]);
+ SPDK_CU_ASSERT_FATAL(tree != NULL);
+ CU_ASSERT(tree->level == 0);
+ tmp_buffer = spdk_tree_find_filled_buffer(tree, buffer[1]->offset);
+ CU_ASSERT(tmp_buffer == buffer[1]);
+
+ /* insert buffer[2] */
+ buffer[2]->offset = (CACHE_TREE_WIDTH - 1) * CACHE_BUFFER_SIZE;
+ tree = spdk_tree_insert_buffer(tree, buffer[2]);
+ SPDK_CU_ASSERT_FATAL(tree != NULL);
+ CU_ASSERT(tree->level == 0);
+ tmp_buffer = spdk_tree_find_buffer(tree, buffer[2]->offset);
+ CU_ASSERT(tmp_buffer == buffer[2]);
+ tmp_buffer = spdk_tree_find_filled_buffer(tree, buffer[2]->offset);
+ CU_ASSERT(tmp_buffer == NULL);
+
+ /* insert buffer[3], set an offset which can not be fit level 0 */
+ buffer[3]->offset = CACHE_TREE_LEVEL_SIZE(1);
+ tree = spdk_tree_insert_buffer(tree, buffer[3]);
+ SPDK_CU_ASSERT_FATAL(tree != NULL);
+ CU_ASSERT(tree->level == 1);
+ tmp_buffer = spdk_tree_find_buffer(tree, buffer[3]->offset);
+ CU_ASSERT(tmp_buffer == buffer[3]);
+
+ /* insert buffer[4], set an offset which can not be fit level 1 */
+ buffer[4]->offset = CACHE_TREE_LEVEL_SIZE(2);
+ tree = spdk_tree_insert_buffer(tree, buffer[4]);
+ SPDK_CU_ASSERT_FATAL(tree != NULL);
+ CU_ASSERT(tree->level == 2);
+ tmp_buffer = spdk_tree_find_buffer(tree, buffer[4]->offset);
+ CU_ASSERT(tmp_buffer == buffer[4]);
+
+ /* delete buffer[0] */
+ spdk_tree_remove_buffer(tree, buffer[0]);
+ /* check whether buffer[0] is still existed or not */
+ tmp_buffer = spdk_tree_find_buffer(tree, 0);
+ CU_ASSERT(tmp_buffer == NULL);
+
+ /* delete buffer[3] */
+ spdk_tree_remove_buffer(tree, buffer[3]);
+ /* check whether buffer[3] is still existed or not */
+ tmp_buffer = spdk_tree_find_buffer(tree, CACHE_TREE_LEVEL_SIZE(1));
+ CU_ASSERT(tmp_buffer == NULL);
+
+ /* free all buffers in the tree */
+ spdk_tree_free_buffers(tree);
+
+ /* check whether buffer[1] is still existed or not */
+ tmp_buffer = spdk_tree_find_buffer(tree, CACHE_BUFFER_SIZE);
+ CU_ASSERT(tmp_buffer == NULL);
+ /* check whether buffer[2] is still existed or not */
+ tmp_buffer = spdk_tree_find_buffer(tree, (CACHE_TREE_WIDTH - 1) * CACHE_BUFFER_SIZE);
+ CU_ASSERT(tmp_buffer == NULL);
+ /* check whether buffer[4] is still existed or not */
+ tmp_buffer = spdk_tree_find_buffer(tree, CACHE_TREE_LEVEL_SIZE(2));
+ CU_ASSERT(tmp_buffer == NULL);
+
+ /* According to spdk_tree_free_buffers, root will not be freed */
+ free(tree);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("tree", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (CU_add_test(suite, "blobfs_tree_op_test", blobfs_tree_op_test) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/event/Makefile b/src/spdk/test/unit/lib/event/Makefile
new file mode 100644
index 00000000..a6629af9
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = subsystem.c app.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/event/app.c/.gitignore b/src/spdk/test/unit/lib/event/app.c/.gitignore
new file mode 100644
index 00000000..123e1673
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/app.c/.gitignore
@@ -0,0 +1 @@
+app_ut
diff --git a/src/spdk/test/unit/lib/event/app.c/Makefile b/src/spdk/test/unit/lib/event/app.c/Makefile
new file mode 100644
index 00000000..cce3ad86
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/app.c/Makefile
@@ -0,0 +1,42 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.mock.unittest.mk
+
+SPDK_LIB_LIST = conf trace jsonrpc json
+TEST_FILE = app_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/event/app.c/app_ut.c b/src/spdk/test/unit/lib/event/app.c/app_ut.c
new file mode 100644
index 00000000..7d549261
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/app.c/app_ut.c
@@ -0,0 +1,195 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+#include "event/app.c"
+
+#define test_argc 6
+
+DEFINE_STUB_V(spdk_rpc_initialize, (const char *listen_addr));
+DEFINE_STUB_V(spdk_rpc_finish, (void));
+DEFINE_STUB_V(spdk_event_call, (struct spdk_event *event));
+DEFINE_STUB_V(spdk_reactors_start, (void));
+DEFINE_STUB_V(spdk_reactors_stop, (void *arg1, void *arg2));
+DEFINE_STUB(spdk_reactors_init, int, (unsigned int max_delay_us), 0);
+DEFINE_STUB_V(spdk_reactors_fini, (void));
+DEFINE_STUB(spdk_event_allocate, struct spdk_event *, (uint32_t core, spdk_event_fn fn, void *arg1,
+ void *arg2), NULL);
+DEFINE_STUB(spdk_env_get_current_core, uint32_t, (void), 0);
+DEFINE_STUB(spdk_app_get_core_mask, struct spdk_cpuset *, (void), NULL);
+DEFINE_STUB_V(spdk_subsystem_config, (FILE *fp));
+DEFINE_STUB_V(spdk_subsystem_init, (struct spdk_event *app_start_event));
+DEFINE_STUB_V(spdk_subsystem_fini, (struct spdk_event *app_stop_event));
+DEFINE_STUB(spdk_env_init, int, (const struct spdk_env_opts *opts), 0);
+DEFINE_STUB_V(spdk_env_opts_init, (struct spdk_env_opts *opts));
+DEFINE_STUB(spdk_env_get_core_count, uint32_t, (void), 1);
+DEFINE_STUB_V(spdk_rpc_register_method, (const char *method, spdk_rpc_method_handler func,
+ uint32_t state_mask));
+DEFINE_STUB_V(spdk_rpc_set_state, (uint32_t state));
+
+
+static void
+unittest_usage(void)
+{
+}
+
+static void
+unittest_parse_args(int ch, char *arg)
+{
+}
+
+static void
+test_spdk_app_parse_args(void)
+{
+ spdk_app_parse_args_rvals_t rc;
+ struct spdk_app_opts opts = {};
+ struct option my_options[2] = {};
+ char *valid_argv[test_argc] = {"app_ut",
+ "--wait-for-rpc",
+ "-d",
+ "-p0",
+ "-B",
+ "0000:81:00.0"
+ };
+ char *invalid_argv_BW[test_argc] = {"app_ut",
+ "-B",
+ "0000:81:00.0",
+ "-W",
+ "0000:82:00.0",
+ "-cspdk.conf"
+ };
+ /* currently use -z as our new option */
+ char *argv_added_short_opt[test_argc] = {"app_ut",
+ "-z",
+ "-d",
+ "--wait-for-rpc",
+ "-p0",
+ "-cspdk.conf"
+ };
+ char *argv_added_long_opt[test_argc] = {"app_ut",
+ "-cspdk.conf",
+ "-d",
+ "-r/var/tmp/spdk.sock",
+ "--test-long-opt",
+ "--wait-for-rpc"
+ };
+ char *invalid_argv_missing_option[test_argc] = {"app_ut",
+ "-d",
+ "-p",
+ "--wait-for-rpc",
+ "--silence-noticelog"
+ "-R"
+ };
+
+ /* Test valid arguments. Expected result: PASS */
+ rc = spdk_app_parse_args(test_argc, valid_argv, &opts, "", NULL, unittest_parse_args, NULL);
+ CU_ASSERT_EQUAL(rc, SPDK_APP_PARSE_ARGS_SUCCESS);
+ optind = 1;
+
+ /* Test invalid short option Expected result: FAIL */
+ rc = spdk_app_parse_args(test_argc, argv_added_short_opt, &opts, "", NULL, unittest_parse_args,
+ NULL);
+ CU_ASSERT_EQUAL(rc, SPDK_APP_PARSE_ARGS_FAIL);
+ optind = 1;
+
+ /* Test valid global and local options. Expected result: PASS */
+ rc = spdk_app_parse_args(test_argc, argv_added_short_opt, &opts, "z", NULL, unittest_parse_args,
+ unittest_usage);
+ CU_ASSERT_EQUAL(rc, SPDK_APP_PARSE_ARGS_SUCCESS);
+ optind = 1;
+
+ /* Test invalid long option Expected result: FAIL */
+ rc = spdk_app_parse_args(test_argc, argv_added_long_opt, &opts, "", NULL, unittest_parse_args,
+ NULL);
+ CU_ASSERT_EQUAL(rc, SPDK_APP_PARSE_ARGS_FAIL);
+ optind = 1;
+
+ /* Test valid global and local options. Expected result: PASS */
+ my_options[0].name = "test-long-opt";
+ rc = spdk_app_parse_args(test_argc, argv_added_long_opt, &opts, "", my_options, unittest_parse_args,
+ unittest_usage);
+ CU_ASSERT_EQUAL(rc, SPDK_APP_PARSE_ARGS_SUCCESS);
+ optind = 1;
+
+ /* Test overlapping global and local options. Expected result: FAIL */
+ rc = spdk_app_parse_args(test_argc, valid_argv, &opts, SPDK_APP_GETOPT_STRING, NULL,
+ unittest_parse_args, NULL);
+ CU_ASSERT_EQUAL(rc, SPDK_APP_PARSE_ARGS_FAIL);
+ optind = 1;
+
+ /* Specify -B and -W options at the same time. Expected result: FAIL */
+ rc = spdk_app_parse_args(test_argc, invalid_argv_BW, &opts, "", NULL, unittest_parse_args, NULL);
+ SPDK_CU_ASSERT_FATAL(rc == SPDK_APP_PARSE_ARGS_FAIL);
+ optind = 1;
+
+ /* Omit necessary argument to option */
+ rc = spdk_app_parse_args(test_argc, invalid_argv_missing_option, &opts, "", NULL,
+ unittest_parse_args, NULL);
+ CU_ASSERT_EQUAL(rc, SPDK_APP_PARSE_ARGS_FAIL);
+ optind = 1;
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("app_suite", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test_spdk_app_parse_args",
+ test_spdk_app_parse_args) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/event/subsystem.c/.gitignore b/src/spdk/test/unit/lib/event/subsystem.c/.gitignore
new file mode 100644
index 00000000..76ca0d33
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/subsystem.c/.gitignore
@@ -0,0 +1 @@
+subsystem_ut
diff --git a/src/spdk/test/unit/lib/event/subsystem.c/Makefile b/src/spdk/test/unit/lib/event/subsystem.c/Makefile
new file mode 100644
index 00000000..ef76f4fe
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/subsystem.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = subsystem_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/event/subsystem.c/subsystem_ut.c b/src/spdk/test/unit/lib/event/subsystem.c/subsystem_ut.c
new file mode 100644
index 00000000..8663e0e3
--- /dev/null
+++ b/src/spdk/test/unit/lib/event/subsystem.c/subsystem_ut.c
@@ -0,0 +1,304 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "unit/lib/json_mock.c"
+#include "event/subsystem.c"
+
+static struct spdk_subsystem g_ut_subsystems[8];
+static struct spdk_subsystem_depend g_ut_subsystem_deps[8];
+static int global_rc;
+
+void
+spdk_app_stop(int rc)
+{
+ global_rc = rc;
+}
+
+uint32_t
+spdk_env_get_current_core(void)
+{
+ return 0;
+}
+
+static void
+ut_event_fn(void *arg1, void *arg2)
+{
+}
+
+struct spdk_event *
+spdk_event_allocate(uint32_t core, spdk_event_fn fn, void *arg1, void *arg2)
+{
+ struct spdk_event *event = calloc(1, sizeof(*event));
+
+ SPDK_CU_ASSERT_FATAL(event != NULL);
+
+ event->fn = fn;
+ event->arg1 = arg1;
+ event->arg2 = arg2;
+
+ return event;
+}
+
+void spdk_event_call(struct spdk_event *event)
+{
+ if (event != NULL) {
+ if (event->fn != NULL) {
+ event->fn(event->arg1, event->arg2);
+ }
+ free(event);
+ }
+}
+
+static void
+set_up_subsystem(struct spdk_subsystem *subsystem, const char *name)
+{
+ subsystem->init = NULL;
+ subsystem->fini = NULL;
+ subsystem->config = NULL;
+ subsystem->name = name;
+}
+
+static void
+set_up_depends(struct spdk_subsystem_depend *depend, const char *subsystem_name,
+ const char *dpends_on_name)
+{
+ depend->name = subsystem_name;
+ depend->depends_on = dpends_on_name;
+}
+
+static void
+subsystem_clear(void)
+{
+ struct spdk_subsystem *subsystem, *subsystem_tmp;
+ struct spdk_subsystem_depend *subsystem_dep, *subsystem_dep_tmp;
+
+ TAILQ_FOREACH_SAFE(subsystem, &g_subsystems, tailq, subsystem_tmp) {
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+ }
+
+ TAILQ_FOREACH_SAFE(subsystem_dep, &g_subsystems_deps, tailq, subsystem_dep_tmp) {
+ TAILQ_REMOVE(&g_subsystems_deps, subsystem_dep, tailq);
+ }
+}
+
+static void
+subsystem_sort_test_depends_on_single(void)
+{
+ struct spdk_subsystem *subsystem;
+ int i;
+ char subsystem_name[16];
+ struct spdk_event *app_start_event;
+
+ global_rc = -1;
+ app_start_event = spdk_event_allocate(0, ut_event_fn, NULL, NULL);
+ spdk_subsystem_init(app_start_event);
+
+ i = 4;
+ TAILQ_FOREACH(subsystem, &g_subsystems, tailq) {
+ snprintf(subsystem_name, sizeof(subsystem_name), "subsystem%d", i);
+ SPDK_CU_ASSERT_FATAL(i > 0);
+ i--;
+ CU_ASSERT(strcmp(subsystem_name, subsystem->name) == 0);
+ }
+}
+
+static void
+subsystem_sort_test_depends_on_multiple(void)
+{
+ int i;
+ struct spdk_subsystem *subsystem;
+ struct spdk_event *app_start_event;
+
+ subsystem_clear();
+ set_up_subsystem(&g_ut_subsystems[0], "iscsi");
+ set_up_subsystem(&g_ut_subsystems[1], "nvmf");
+ set_up_subsystem(&g_ut_subsystems[2], "sock");
+ set_up_subsystem(&g_ut_subsystems[3], "bdev");
+ set_up_subsystem(&g_ut_subsystems[4], "rpc");
+ set_up_subsystem(&g_ut_subsystems[5], "scsi");
+ set_up_subsystem(&g_ut_subsystems[6], "interface");
+ set_up_subsystem(&g_ut_subsystems[7], "copy");
+
+ for (i = 0; i < 8; i++) {
+ spdk_add_subsystem(&g_ut_subsystems[i]);
+ }
+
+ set_up_depends(&g_ut_subsystem_deps[0], "bdev", "copy");
+ set_up_depends(&g_ut_subsystem_deps[1], "scsi", "bdev");
+ set_up_depends(&g_ut_subsystem_deps[2], "rpc", "interface");
+ set_up_depends(&g_ut_subsystem_deps[3], "sock", "interface");
+ set_up_depends(&g_ut_subsystem_deps[4], "nvmf", "interface");
+ set_up_depends(&g_ut_subsystem_deps[5], "iscsi", "scsi");
+ set_up_depends(&g_ut_subsystem_deps[6], "iscsi", "sock");
+ set_up_depends(&g_ut_subsystem_deps[7], "iscsi", "rpc");
+
+ for (i = 0; i < 8; i++) {
+ spdk_add_subsystem_depend(&g_ut_subsystem_deps[i]);
+ }
+
+ global_rc = -1;
+ app_start_event = spdk_event_allocate(0, ut_event_fn, NULL, NULL);
+ spdk_subsystem_init(app_start_event);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "interface") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "copy") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "nvmf") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "sock") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "bdev") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "rpc") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "scsi") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+
+ subsystem = TAILQ_FIRST(&g_subsystems);
+ CU_ASSERT(strcmp(subsystem->name, "iscsi") == 0);
+ TAILQ_REMOVE(&g_subsystems, subsystem, tailq);
+}
+
+struct spdk_subsystem subsystem1 = {
+ .name = "subsystem1",
+};
+
+struct spdk_subsystem subsystem2 = {
+ .name = "subsystem2",
+};
+struct spdk_subsystem subsystem3 = {
+ .name = "subsystem3",
+};
+
+struct spdk_subsystem subsystem4 = {
+ .name = "subsystem4",
+};
+
+SPDK_SUBSYSTEM_REGISTER(subsystem1);
+SPDK_SUBSYSTEM_REGISTER(subsystem2);
+SPDK_SUBSYSTEM_REGISTER(subsystem3);
+SPDK_SUBSYSTEM_REGISTER(subsystem4);
+
+SPDK_SUBSYSTEM_DEPEND(subsystem1, subsystem2)
+SPDK_SUBSYSTEM_DEPEND(subsystem2, subsystem3)
+SPDK_SUBSYSTEM_DEPEND(subsystem3, subsystem4)
+
+
+static void
+subsystem_sort_test_missing_dependency(void)
+{
+ /*
+ * A depends on B, but B is missing
+ */
+
+ subsystem_clear();
+ set_up_subsystem(&g_ut_subsystems[0], "A");
+ spdk_add_subsystem(&g_ut_subsystems[0]);
+
+ set_up_depends(&g_ut_subsystem_deps[0], "A", "B");
+ spdk_add_subsystem_depend(&g_ut_subsystem_deps[0]);
+
+ global_rc = -1;
+ spdk_subsystem_init(NULL);
+ CU_ASSERT(global_rc != 0);
+
+ /*
+ * Dependency from C to A is defined, but C is missing
+ */
+
+ subsystem_clear();
+ set_up_subsystem(&g_ut_subsystems[0], "A");
+ spdk_add_subsystem(&g_ut_subsystems[0]);
+
+ set_up_depends(&g_ut_subsystem_deps[0], "C", "A");
+ spdk_add_subsystem_depend(&g_ut_subsystem_deps[0]);
+
+ global_rc = -1;
+ spdk_subsystem_init(NULL);
+ CU_ASSERT(global_rc != 0);
+
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("subsystem_suite", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "subsystem_sort_test_depends_on_single",
+ subsystem_sort_test_depends_on_single) == NULL
+ || CU_add_test(suite, "subsystem_sort_test_depends_on_multiple",
+ subsystem_sort_test_depends_on_multiple) == NULL
+ || CU_add_test(suite, "subsystem_sort_test_missing_dependency",
+ subsystem_sort_test_missing_dependency) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/ioat/Makefile b/src/spdk/test/unit/lib/ioat/Makefile
new file mode 100644
index 00000000..8d982710
--- /dev/null
+++ b/src/spdk/test/unit/lib/ioat/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = ioat.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/ioat/ioat.c/.gitignore b/src/spdk/test/unit/lib/ioat/ioat.c/.gitignore
new file mode 100644
index 00000000..deefbf0c
--- /dev/null
+++ b/src/spdk/test/unit/lib/ioat/ioat.c/.gitignore
@@ -0,0 +1 @@
+ioat_ut
diff --git a/src/spdk/test/unit/lib/ioat/ioat.c/Makefile b/src/spdk/test/unit/lib/ioat/ioat.c/Makefile
new file mode 100644
index 00000000..c3787c3d
--- /dev/null
+++ b/src/spdk/test/unit/lib/ioat/ioat.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+TEST_FILE = ioat_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/ioat/ioat.c/ioat_ut.c b/src/spdk/test/unit/lib/ioat/ioat.c/ioat_ut.c
new file mode 100644
index 00000000..92330e32
--- /dev/null
+++ b/src/spdk/test/unit/lib/ioat/ioat.c/ioat_ut.c
@@ -0,0 +1,153 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "ioat/ioat.c"
+
+#include "spdk_internal/mock.h"
+
+#include "common/lib/test_env.c"
+
+int
+spdk_pci_ioat_enumerate(spdk_pci_enum_cb enum_cb, void *enum_ctx)
+{
+ return -1;
+}
+
+int
+spdk_pci_device_map_bar(struct spdk_pci_device *dev, uint32_t bar,
+ void **mapped_addr, uint64_t *phys_addr, uint64_t *size)
+{
+ *mapped_addr = NULL;
+ *phys_addr = 0;
+ *size = 0;
+ return 0;
+}
+
+int
+spdk_pci_device_unmap_bar(struct spdk_pci_device *dev, uint32_t bar, void *addr)
+{
+ return 0;
+}
+
+int
+spdk_pci_device_cfg_read32(struct spdk_pci_device *dev, uint32_t *value,
+ uint32_t offset)
+{
+ *value = 0xFFFFFFFFu;
+ return 0;
+}
+
+int
+spdk_pci_device_cfg_write32(struct spdk_pci_device *dev, uint32_t value,
+ uint32_t offset)
+{
+ return 0;
+}
+
+static void ioat_state_check(void)
+{
+ /*
+ * CHANSTS's STATUS field is 3 bits (8 possible values), but only has 5 valid states:
+ * ACTIVE 0x0
+ * IDLE 0x1
+ * SUSPENDED 0x2
+ * HALTED 0x3
+ * ARMED 0x4
+ */
+
+ CU_ASSERT(is_ioat_active(0) == 1); /* ACTIVE */
+ CU_ASSERT(is_ioat_active(1) == 0); /* IDLE */
+ CU_ASSERT(is_ioat_active(2) == 0); /* SUSPENDED */
+ CU_ASSERT(is_ioat_active(3) == 0); /* HALTED */
+ CU_ASSERT(is_ioat_active(4) == 0); /* ARMED */
+ CU_ASSERT(is_ioat_active(5) == 0); /* reserved */
+ CU_ASSERT(is_ioat_active(6) == 0); /* reserved */
+ CU_ASSERT(is_ioat_active(7) == 0); /* reserved */
+
+ CU_ASSERT(is_ioat_idle(0) == 0); /* ACTIVE */
+ CU_ASSERT(is_ioat_idle(1) == 1); /* IDLE */
+ CU_ASSERT(is_ioat_idle(2) == 0); /* SUSPENDED */
+ CU_ASSERT(is_ioat_idle(3) == 0); /* HALTED */
+ CU_ASSERT(is_ioat_idle(4) == 0); /* ARMED */
+ CU_ASSERT(is_ioat_idle(5) == 0); /* reserved */
+ CU_ASSERT(is_ioat_idle(6) == 0); /* reserved */
+ CU_ASSERT(is_ioat_idle(7) == 0); /* reserved */
+
+ CU_ASSERT(is_ioat_suspended(0) == 0); /* ACTIVE */
+ CU_ASSERT(is_ioat_suspended(1) == 0); /* IDLE */
+ CU_ASSERT(is_ioat_suspended(2) == 1); /* SUSPENDED */
+ CU_ASSERT(is_ioat_suspended(3) == 0); /* HALTED */
+ CU_ASSERT(is_ioat_suspended(4) == 0); /* ARMED */
+ CU_ASSERT(is_ioat_suspended(5) == 0); /* reserved */
+ CU_ASSERT(is_ioat_suspended(6) == 0); /* reserved */
+ CU_ASSERT(is_ioat_suspended(7) == 0); /* reserved */
+
+ CU_ASSERT(is_ioat_halted(0) == 0); /* ACTIVE */
+ CU_ASSERT(is_ioat_halted(1) == 0); /* IDLE */
+ CU_ASSERT(is_ioat_halted(2) == 0); /* SUSPENDED */
+ CU_ASSERT(is_ioat_halted(3) == 1); /* HALTED */
+ CU_ASSERT(is_ioat_halted(4) == 0); /* ARMED */
+ CU_ASSERT(is_ioat_halted(5) == 0); /* reserved */
+ CU_ASSERT(is_ioat_halted(6) == 0); /* reserved */
+ CU_ASSERT(is_ioat_halted(7) == 0); /* reserved */
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("ioat", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "ioat_state_check", ioat_state_check) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/iscsi/Makefile b/src/spdk/test/unit/lib/iscsi/Makefile
new file mode 100644
index 00000000..396c5a05
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = conn.c init_grp.c iscsi.c param.c portal_grp.c tgt_node.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/iscsi/common.c b/src/spdk/test/unit/lib/iscsi/common.c
new file mode 100644
index 00000000..9ef4f9ab
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/common.c
@@ -0,0 +1,256 @@
+#include "iscsi/task.h"
+#include "iscsi/iscsi.h"
+#include "iscsi/conn.h"
+#include "iscsi/acceptor.h"
+
+#include "spdk/env.h"
+#include "spdk/event.h"
+#include "spdk/sock.h"
+#include "spdk_cunit.h"
+
+#include "spdk_internal/log.h"
+
+#include "scsi/scsi_internal.h"
+
+SPDK_LOG_REGISTER_COMPONENT("iscsi", SPDK_LOG_ISCSI)
+
+TAILQ_HEAD(, spdk_iscsi_pdu) g_write_pdu_list;
+
+struct spdk_iscsi_task *
+spdk_iscsi_task_get(struct spdk_iscsi_conn *conn,
+ struct spdk_iscsi_task *parent,
+ spdk_scsi_task_cpl cpl_fn)
+{
+ struct spdk_iscsi_task *task;
+
+ task = calloc(1, sizeof(*task));
+
+ return task;
+}
+
+void
+spdk_scsi_task_put(struct spdk_scsi_task *task)
+{
+ free(task);
+}
+
+void
+spdk_put_pdu(struct spdk_iscsi_pdu *pdu)
+{
+ if (!pdu) {
+ return;
+ }
+
+ pdu->ref--;
+ if (pdu->ref < 0) {
+ CU_FAIL("negative ref count");
+ pdu->ref = 0;
+ }
+
+ if (pdu->ref == 0) {
+ if (pdu->data && !pdu->data_from_mempool) {
+ free(pdu->data);
+ }
+ free(pdu);
+ }
+}
+
+struct spdk_iscsi_pdu *
+spdk_get_pdu(void)
+{
+ struct spdk_iscsi_pdu *pdu;
+
+ pdu = malloc(sizeof(*pdu));
+ if (!pdu) {
+ return NULL;
+ }
+
+ memset(pdu, 0, offsetof(struct spdk_iscsi_pdu, ahs));
+ pdu->ref = 1;
+
+ return pdu;
+}
+
+void
+spdk_scsi_task_process_null_lun(struct spdk_scsi_task *task)
+{
+}
+
+void
+spdk_scsi_dev_queue_task(struct spdk_scsi_dev *dev,
+ struct spdk_scsi_task *task)
+{
+}
+
+struct spdk_scsi_port *
+spdk_scsi_dev_find_port_by_id(struct spdk_scsi_dev *dev, uint64_t id)
+{
+ return NULL;
+}
+
+void
+spdk_scsi_dev_queue_mgmt_task(struct spdk_scsi_dev *dev,
+ struct spdk_scsi_task *task,
+ enum spdk_scsi_task_func func)
+{
+}
+
+const char *
+spdk_scsi_dev_get_name(const struct spdk_scsi_dev *dev)
+{
+ if (dev != NULL) {
+ return dev->name;
+ }
+
+ return NULL;
+}
+
+void
+spdk_iscsi_acceptor_start(struct spdk_iscsi_portal *p)
+{
+}
+
+void
+spdk_iscsi_acceptor_stop(struct spdk_iscsi_portal *p)
+{
+}
+
+struct spdk_sock *
+spdk_sock_listen(const char *ip, int port)
+{
+ static int g_sock;
+
+ return (struct spdk_sock *)&g_sock;
+}
+
+int
+spdk_sock_close(struct spdk_sock **sock)
+{
+ *sock = NULL;
+
+ return 0;
+}
+
+static struct spdk_cpuset *g_app_core_mask;
+
+struct spdk_cpuset *
+spdk_app_get_core_mask(void)
+{
+ int i;
+ if (!g_app_core_mask) {
+ g_app_core_mask = spdk_cpuset_alloc();
+ for (i = 0; i < SPDK_CPUSET_SIZE; i++) {
+ spdk_cpuset_set_cpu(g_app_core_mask, i, true);
+ }
+ }
+ return g_app_core_mask;
+}
+
+int
+spdk_app_parse_core_mask(const char *mask, struct spdk_cpuset *cpumask)
+{
+ int rc;
+
+ if (mask == NULL || cpumask == NULL) {
+ return -1;
+ }
+
+ rc = spdk_cpuset_parse(cpumask, mask);
+ if (rc < 0) {
+ return -1;
+ }
+ return 0;
+}
+
+uint32_t
+spdk_env_get_current_core(void)
+{
+ return 0;
+}
+
+struct spdk_event *
+spdk_event_allocate(uint32_t core, spdk_event_fn fn, void *arg1, void *arg2)
+{
+ return NULL;
+}
+
+struct spdk_scsi_dev *
+ spdk_scsi_dev_construct(const char *name, const char **bdev_name_list,
+ int *lun_id_list, int num_luns, uint8_t protocol_id,
+ void (*hotremove_cb)(const struct spdk_scsi_lun *, void *),
+ void *hotremove_ctx)
+{
+ return NULL;
+}
+
+void
+spdk_scsi_dev_destruct(struct spdk_scsi_dev *dev)
+{
+}
+
+int
+spdk_scsi_dev_add_port(struct spdk_scsi_dev *dev, uint64_t id, const char *name)
+{
+ return 0;
+}
+
+int
+spdk_iscsi_drop_conns(struct spdk_iscsi_conn *conn, const char *conn_match,
+ int drop_all)
+{
+ return 0;
+}
+
+int
+spdk_scsi_dev_delete_port(struct spdk_scsi_dev *dev, uint64_t id)
+{
+ return 0;
+}
+
+void
+spdk_shutdown_iscsi_conns(void)
+{
+}
+
+void
+spdk_iscsi_task_cpl(struct spdk_scsi_task *scsi_task)
+{
+
+}
+
+void
+spdk_iscsi_task_mgmt_cpl(struct spdk_scsi_task *scsi_task)
+{
+
+}
+
+int
+spdk_iscsi_conn_read_data(struct spdk_iscsi_conn *conn, int bytes,
+ void *buf)
+{
+ return 0;
+}
+
+void
+spdk_iscsi_conn_write_pdu(struct spdk_iscsi_conn *conn, struct spdk_iscsi_pdu *pdu)
+{
+ TAILQ_INSERT_TAIL(&g_write_pdu_list, pdu, tailq);
+}
+
+void
+spdk_iscsi_conn_logout(struct spdk_iscsi_conn *conn)
+{
+}
+
+void
+spdk_scsi_task_set_status(struct spdk_scsi_task *task, int sc, int sk, int asc, int ascq)
+{
+}
+
+void
+spdk_scsi_task_set_data(struct spdk_scsi_task *task, void *data, uint32_t len)
+{
+ SPDK_CU_ASSERT_FATAL(task->iovs != NULL);
+ task->iovs[0].iov_base = data;
+ task->iovs[0].iov_len = len;
+}
diff --git a/src/spdk/test/unit/lib/iscsi/conn.c/.gitignore b/src/spdk/test/unit/lib/iscsi/conn.c/.gitignore
new file mode 100644
index 00000000..3bb0afd8
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/conn.c/.gitignore
@@ -0,0 +1 @@
+conn_ut
diff --git a/src/spdk/test/unit/lib/iscsi/conn.c/Makefile b/src/spdk/test/unit/lib/iscsi/conn.c/Makefile
new file mode 100644
index 00000000..96f2f5d7
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/conn.c/Makefile
@@ -0,0 +1,42 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+SPDK_LIB_LIST = trace
+
+TEST_FILE = conn_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/iscsi/conn.c/conn_ut.c b/src/spdk/test/unit/lib/iscsi/conn.c/conn_ut.c
new file mode 100644
index 00000000..88d23423
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/conn.c/conn_ut.c
@@ -0,0 +1,404 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "common/lib/test_env.c"
+#include "spdk_cunit.h"
+
+#include "iscsi/conn.c"
+
+SPDK_LOG_REGISTER_COMPONENT("iscsi", SPDK_LOG_ISCSI)
+
+#define DMIN32(A,B) ((uint32_t) ((uint32_t)(A) > (uint32_t)(B) ? (uint32_t)(B) : (uint32_t)(A)))
+
+struct spdk_iscsi_globals g_spdk_iscsi;
+static TAILQ_HEAD(, spdk_iscsi_task) g_ut_read_tasks = TAILQ_HEAD_INITIALIZER(g_ut_read_tasks);
+
+int
+spdk_app_get_shm_id(void)
+{
+ return 0;
+}
+
+uint32_t
+spdk_env_get_current_core(void)
+{
+ return 0;
+}
+
+uint32_t
+spdk_env_get_first_core(void)
+{
+ return 0;
+}
+
+uint32_t
+spdk_env_get_last_core(void)
+{
+ return 0;
+}
+
+uint32_t
+spdk_env_get_next_core(uint32_t prev_core)
+{
+ return 0;
+}
+
+struct spdk_event *
+spdk_event_allocate(uint32_t lcore, spdk_event_fn fn, void *arg1, void *arg2)
+{
+ return NULL;
+}
+
+void
+spdk_event_call(struct spdk_event *event)
+{
+}
+
+int
+spdk_sock_getaddr(struct spdk_sock *sock, char *saddr, int slen, uint16_t *sport,
+ char *caddr, int clen, uint16_t *cport)
+{
+ return 0;
+}
+
+int
+spdk_sock_close(struct spdk_sock **sock)
+{
+ *sock = NULL;
+ return 0;
+}
+
+ssize_t
+spdk_sock_recv(struct spdk_sock *sock, void *buf, size_t len)
+{
+ return 0;
+}
+
+ssize_t
+spdk_sock_writev(struct spdk_sock *sock, struct iovec *iov, int iovcnt)
+{
+ return 0;
+}
+
+int
+spdk_sock_set_recvlowat(struct spdk_sock *s, int nbytes)
+{
+ return 0;
+}
+
+int
+spdk_sock_set_recvbuf(struct spdk_sock *sock, int sz)
+{
+ return 0;
+}
+
+int
+spdk_sock_set_sendbuf(struct spdk_sock *sock, int sz)
+{
+ return 0;
+}
+
+int
+spdk_sock_group_add_sock(struct spdk_sock_group *group, struct spdk_sock *sock,
+ spdk_sock_cb cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+int
+spdk_sock_group_remove_sock(struct spdk_sock_group *group, struct spdk_sock *sock)
+{
+ return 0;
+}
+
+void
+spdk_scsi_task_put(struct spdk_scsi_task *task)
+{
+}
+
+struct spdk_scsi_lun *
+spdk_scsi_dev_get_lun(struct spdk_scsi_dev *dev, int lun_id)
+{
+ return NULL;
+}
+
+bool
+spdk_scsi_dev_has_pending_tasks(const struct spdk_scsi_dev *dev)
+{
+ return true;
+}
+
+int
+spdk_scsi_lun_open(struct spdk_scsi_lun *lun, spdk_scsi_remove_cb_t hotremove_cb,
+ void *hotremove_ctx, struct spdk_scsi_desc **desc)
+{
+ return 0;
+}
+
+void
+spdk_scsi_lun_close(struct spdk_scsi_desc *desc)
+{
+}
+
+int spdk_scsi_lun_allocate_io_channel(struct spdk_scsi_desc *desc)
+{
+ return 0;
+}
+
+void spdk_scsi_lun_free_io_channel(struct spdk_scsi_desc *desc)
+{
+}
+
+int
+spdk_scsi_lun_get_id(const struct spdk_scsi_lun *lun)
+{
+ return 0;
+}
+
+const char *
+spdk_scsi_port_get_name(const struct spdk_scsi_port *port)
+{
+ return NULL;
+}
+
+void
+spdk_scsi_task_copy_status(struct spdk_scsi_task *dst,
+ struct spdk_scsi_task *src)
+{
+}
+
+void
+spdk_put_pdu(struct spdk_iscsi_pdu *pdu)
+{
+}
+
+void
+spdk_iscsi_param_free(struct iscsi_param *params)
+{
+}
+
+int
+spdk_iscsi_conn_params_init(struct iscsi_param **params)
+{
+ return 0;
+}
+
+void spdk_clear_all_transfer_task(struct spdk_iscsi_conn *conn,
+ struct spdk_scsi_lun *lun)
+{
+}
+
+int
+spdk_iscsi_build_iovecs(struct spdk_iscsi_conn *conn, struct iovec *iovec,
+ struct spdk_iscsi_pdu *pdu)
+{
+ return 0;
+}
+
+bool spdk_iscsi_is_deferred_free_pdu(struct spdk_iscsi_pdu *pdu)
+{
+ return false;
+}
+
+void spdk_iscsi_task_response(struct spdk_iscsi_conn *conn,
+ struct spdk_iscsi_task *task)
+{
+}
+
+void
+spdk_iscsi_task_mgmt_response(struct spdk_iscsi_conn *conn,
+ struct spdk_iscsi_task *task)
+{
+}
+
+void spdk_iscsi_send_nopin(struct spdk_iscsi_conn *conn)
+{
+}
+
+int
+spdk_iscsi_execute(struct spdk_iscsi_conn *conn, struct spdk_iscsi_pdu *pdu)
+{
+ return 0;
+}
+
+void spdk_del_transfer_task(struct spdk_iscsi_conn *conn, uint32_t task_tag)
+{
+}
+
+int spdk_iscsi_conn_handle_queued_datain_tasks(struct spdk_iscsi_conn *conn)
+{
+ return 0;
+}
+
+int
+spdk_iscsi_read_pdu(struct spdk_iscsi_conn *conn, struct spdk_iscsi_pdu **_pdu)
+{
+ return 0;
+}
+
+void spdk_free_sess(struct spdk_iscsi_sess *sess)
+{
+}
+
+int
+spdk_iscsi_tgt_node_cleanup_luns(struct spdk_iscsi_conn *conn,
+ struct spdk_iscsi_tgt_node *target)
+{
+ return 0;
+}
+
+void
+spdk_shutdown_iscsi_conns_done(void)
+{
+}
+
+static struct spdk_iscsi_task *
+ut_conn_task_get(struct spdk_iscsi_task *parent)
+{
+ struct spdk_iscsi_task *task;
+
+ task = calloc(1, sizeof(*task));
+ SPDK_CU_ASSERT_FATAL(task != NULL);
+
+ if (parent) {
+ task->parent = parent;
+ }
+ return task;
+}
+
+static void
+ut_conn_create_read_tasks(int transfer_len)
+{
+ struct spdk_iscsi_task *task, *subtask;
+ int32_t remaining_size = 0;
+
+ task = ut_conn_task_get(NULL);
+
+ task->scsi.transfer_len = transfer_len;
+ task->scsi.offset = 0;
+ task->scsi.length = DMIN32(SPDK_BDEV_LARGE_BUF_MAX_SIZE, task->scsi.transfer_len);
+ task->scsi.status = SPDK_SCSI_STATUS_GOOD;
+
+ remaining_size = task->scsi.transfer_len - task->scsi.length;
+ task->current_datain_offset = 0;
+
+ if (remaining_size == 0) {
+ TAILQ_INSERT_TAIL(&g_ut_read_tasks, task, link);
+ return;
+ }
+
+ while (1) {
+ if (task->current_datain_offset == 0) {
+ task->current_datain_offset = task->scsi.length;
+ TAILQ_INSERT_TAIL(&g_ut_read_tasks, task, link);
+ continue;
+ }
+
+ if (task->current_datain_offset < task->scsi.transfer_len) {
+ remaining_size = task->scsi.transfer_len - task->current_datain_offset;
+
+ subtask = ut_conn_task_get(task);
+
+ subtask->scsi.offset = task->current_datain_offset;
+ subtask->scsi.length = DMIN32(SPDK_BDEV_LARGE_BUF_MAX_SIZE, remaining_size);
+ subtask->scsi.status = SPDK_SCSI_STATUS_GOOD;
+
+ task->current_datain_offset += subtask->scsi.length;
+
+ TAILQ_INSERT_TAIL(&g_ut_read_tasks, subtask, link);
+ }
+
+ if (task->current_datain_offset == task->scsi.transfer_len) {
+ break;
+ }
+ }
+}
+
+static void
+read_task_split_in_order_case(void)
+{
+ struct spdk_iscsi_task *primary, *task, *tmp;
+
+ ut_conn_create_read_tasks(SPDK_BDEV_LARGE_BUF_MAX_SIZE * 8);
+
+ TAILQ_FOREACH(task, &g_ut_read_tasks, link) {
+ primary = spdk_iscsi_task_get_primary(task);
+ process_read_task_completion(NULL, task, primary);
+ }
+
+ primary = TAILQ_FIRST(&g_ut_read_tasks);
+ SPDK_CU_ASSERT_FATAL(primary != NULL);
+
+ if (primary != NULL) {
+ CU_ASSERT(primary->bytes_completed == primary->scsi.transfer_len);
+ }
+
+ TAILQ_FOREACH_SAFE(task, &g_ut_read_tasks, link, tmp) {
+ TAILQ_REMOVE(&g_ut_read_tasks, task, link);
+ free(task);
+ }
+
+ CU_ASSERT(TAILQ_EMPTY(&g_ut_read_tasks));
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("conn_suite", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "read task split in order", read_task_split_in_order_case) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/iscsi/init_grp.c/.gitignore b/src/spdk/test/unit/lib/iscsi/init_grp.c/.gitignore
new file mode 100644
index 00000000..8fbc2b63
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/init_grp.c/.gitignore
@@ -0,0 +1 @@
+init_grp_ut
diff --git a/src/spdk/test/unit/lib/iscsi/init_grp.c/Makefile b/src/spdk/test/unit/lib/iscsi/init_grp.c/Makefile
new file mode 100644
index 00000000..9c87ef55
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/init_grp.c/Makefile
@@ -0,0 +1,41 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+SPDK_LIB_LIST = conf
+TEST_FILE = init_grp_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp.conf b/src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp.conf
new file mode 100644
index 00000000..aaa660de
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp.conf
@@ -0,0 +1,31 @@
+[IG_Valid0]
+# Success is expected.
+ InitiatorName iqn.2017-10.spdk.io:0001
+ Netmask 192.168.2.0
+
+[IG_Valid1]
+# Success is expected.
+ InitiatorName iqn.2017-10.spdk.io:0001
+ Netmask 192.168.2.0
+ Netmask 192.168.2.1
+
+[IG_Valid2]
+# Success is expected.
+ InitiatorName iqn.2017-10.spdk.io:0001
+ InitiatorName iqn.2017-10.spdk.io:0002
+ Netmask 192.168.2.0
+
+[IG_Valid3]
+# Success is expected.
+ InitiatorName iqn.2017-10.spdk.io:0001
+ InitiatorName iqn.2017-10.spdk.io:0002
+ Netmask 192.168.2.0
+ Netmask 192.168.2.1
+
+[IG_Invalid0]
+# Failure is expected.
+ InitiatorName iqn.2017-10.spdk.io:0001
+
+[IG_Invalid1]
+# Failure is expected.
+ Netmask 192.168.2.0
diff --git a/src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp_ut.c b/src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp_ut.c
new file mode 100644
index 00000000..5fcce81b
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/init_grp.c/init_grp_ut.c
@@ -0,0 +1,702 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "CUnit/Basic.h"
+
+#include "iscsi/init_grp.c"
+#include "unit/lib/json_mock.c"
+
+SPDK_LOG_REGISTER_COMPONENT("iscsi", SPDK_LOG_ISCSI)
+
+struct spdk_iscsi_globals g_spdk_iscsi;
+
+const char *config_file;
+
+static int
+test_setup(void)
+{
+ TAILQ_INIT(&g_spdk_iscsi.ig_head);
+ return 0;
+}
+
+static void
+create_from_config_file_cases(void)
+{
+ struct spdk_conf *config;
+ struct spdk_conf_section *sp;
+ char section_name[64];
+ int section_index;
+ int rc;
+
+ config = spdk_conf_allocate();
+
+ rc = spdk_conf_read(config, config_file);
+ CU_ASSERT(rc == 0);
+
+ section_index = 0;
+ while (true) {
+ snprintf(section_name, sizeof(section_name), "IG_Valid%d", section_index);
+
+ sp = spdk_conf_find_section(config, section_name);
+ if (sp == NULL) {
+ break;
+ }
+
+ rc = spdk_iscsi_parse_init_grp(sp);
+ CU_ASSERT(rc == 0);
+
+ spdk_iscsi_init_grps_destroy();
+
+ section_index++;
+ }
+
+ section_index = 0;
+ while (true) {
+ snprintf(section_name, sizeof(section_name), "IG_Invalid%d", section_index);
+
+ sp = spdk_conf_find_section(config, section_name);
+ if (sp == NULL) {
+ break;
+ }
+
+ rc = spdk_iscsi_parse_init_grp(sp);
+ CU_ASSERT(rc != 0);
+
+ spdk_iscsi_init_grps_destroy();
+
+ section_index++;
+ }
+
+ spdk_conf_free(config);
+}
+
+
+static void
+create_initiator_group_success_case(void)
+{
+ struct spdk_iscsi_init_grp *ig;
+
+ ig = spdk_iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ spdk_iscsi_init_grp_destroy(ig);
+}
+
+static void
+find_initiator_group_success_case(void)
+{
+ struct spdk_iscsi_init_grp *ig, *tmp;
+ int rc;
+
+ ig = spdk_iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ rc = spdk_iscsi_init_grp_register(ig);
+ CU_ASSERT(rc == 0);
+
+ ig = spdk_iscsi_init_grp_find_by_tag(1);
+ CU_ASSERT(ig != NULL);
+
+ tmp = spdk_iscsi_init_grp_unregister(1);
+ CU_ASSERT(ig == tmp);
+ spdk_iscsi_init_grp_destroy(ig);
+
+ ig = spdk_iscsi_init_grp_find_by_tag(1);
+ CU_ASSERT(ig == NULL);
+}
+
+static void
+register_initiator_group_twice_case(void)
+{
+ struct spdk_iscsi_init_grp *ig, *tmp;
+ int rc;
+
+ ig = spdk_iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ rc = spdk_iscsi_init_grp_register(ig);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_iscsi_init_grp_register(ig);
+ CU_ASSERT(rc != 0);
+
+ ig = spdk_iscsi_init_grp_find_by_tag(1);
+ CU_ASSERT(ig != NULL);
+
+ tmp = spdk_iscsi_init_grp_unregister(1);
+ CU_ASSERT(tmp == ig);
+ spdk_iscsi_init_grp_destroy(ig);
+
+ ig = spdk_iscsi_init_grp_find_by_tag(1);
+ CU_ASSERT(ig == NULL);
+}
+
+static void
+add_initiator_name_success_case(void)
+{
+
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_name *iname;
+ char *name1 = "iqn.2017-10.spdk.io:0001";
+ char *name2 = "iqn.2017-10.spdk.io:0002";
+
+ ig = spdk_iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ /* add two different names to the empty name list */
+ rc = spdk_iscsi_init_grp_add_initiator(ig, name1);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_iscsi_init_grp_add_initiator(ig, name2);
+ CU_ASSERT(rc == 0);
+
+ /* check if two names are added correctly. */
+ iname = spdk_iscsi_init_grp_find_initiator(ig, name1);
+ CU_ASSERT(iname != NULL);
+
+ iname = spdk_iscsi_init_grp_find_initiator(ig, name2);
+ CU_ASSERT(iname != NULL);
+
+ /* restore the initial state */
+ rc = spdk_iscsi_init_grp_delete_initiator(ig, name1);
+ CU_ASSERT(rc == 0);
+
+ iname = spdk_iscsi_init_grp_find_initiator(ig, name1);
+ CU_ASSERT(iname == NULL);
+
+ rc = spdk_iscsi_init_grp_delete_initiator(ig, name2);
+ CU_ASSERT(rc == 0);
+
+ iname = spdk_iscsi_init_grp_find_initiator(ig, name2);
+ CU_ASSERT(iname == NULL);
+
+ spdk_iscsi_init_grp_destroy(ig);
+}
+
+static void
+add_initiator_name_fail_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_name *iname;
+ char *name1 = "iqn.2017-10.spdk.io:0001";
+
+ ig = spdk_iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ /* add an name to the full name list */
+ ig->ninitiators = MAX_INITIATOR;
+
+ rc = spdk_iscsi_init_grp_add_initiator(ig, name1);
+ CU_ASSERT(rc != 0);
+
+ ig->ninitiators = 0;
+
+ /* add the same name to the name list twice */
+ rc = spdk_iscsi_init_grp_add_initiator(ig, name1);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_iscsi_init_grp_add_initiator(ig, name1);
+ CU_ASSERT(rc != 0);
+
+ /* restore the initial state */
+ rc = spdk_iscsi_init_grp_delete_initiator(ig, name1);
+ CU_ASSERT(rc == 0);
+
+ iname = spdk_iscsi_init_grp_find_initiator(ig, name1);
+ CU_ASSERT(iname == NULL);
+
+ spdk_iscsi_init_grp_destroy(ig);
+}
+
+static void
+delete_all_initiator_names_success_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_name *iname;
+ char *name1 = "iqn.2017-10.spdk.io:0001";
+ char *name2 = "iqn.2017-10.spdk.io:0002";
+
+ ig = spdk_iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ /* add two different names to the empty name list */
+ rc = spdk_iscsi_init_grp_add_initiator(ig, name1);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_iscsi_init_grp_add_initiator(ig, name2);
+ CU_ASSERT(rc == 0);
+
+ /* delete all initiator names */
+ spdk_iscsi_init_grp_delete_all_initiators(ig);
+
+ /* check if two names are deleted correctly. */
+ iname = spdk_iscsi_init_grp_find_initiator(ig, name1);
+ CU_ASSERT(iname == NULL);
+
+ iname = spdk_iscsi_init_grp_find_initiator(ig, name2);
+ CU_ASSERT(iname == NULL);
+
+ /* restore the initial state */
+ spdk_iscsi_init_grp_destroy(ig);
+}
+
+static void
+add_netmask_success_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_netmask *imask;
+ char *netmask1 = "192.168.2.0";
+ char *netmask2 = "192.168.2.1";
+
+ ig = spdk_iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ /* add two different netmasks to the empty netmask list */
+ rc = spdk_iscsi_init_grp_add_netmask(ig, netmask1);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_iscsi_init_grp_add_netmask(ig, netmask2);
+ CU_ASSERT(rc == 0);
+
+ /* check if two netmasks are added correctly. */
+ imask = spdk_iscsi_init_grp_find_netmask(ig, netmask1);
+ CU_ASSERT(imask != NULL);
+
+ imask = spdk_iscsi_init_grp_find_netmask(ig, netmask2);
+ CU_ASSERT(imask != NULL);
+
+ /* restore the initial state */
+ rc = spdk_iscsi_init_grp_delete_netmask(ig, netmask1);
+ CU_ASSERT(rc == 0);
+
+ imask = spdk_iscsi_init_grp_find_netmask(ig, netmask1);
+ CU_ASSERT(imask == NULL);
+
+ rc = spdk_iscsi_init_grp_delete_netmask(ig, netmask2);
+ CU_ASSERT(rc == 0);
+
+ imask = spdk_iscsi_init_grp_find_netmask(ig, netmask2);
+ CU_ASSERT(imask == NULL);
+
+ spdk_iscsi_init_grp_destroy(ig);
+}
+
+static void
+add_netmask_fail_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_netmask *imask;
+ char *netmask1 = "192.168.2.0";
+
+ ig = spdk_iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ /* add an netmask to the full netmask list */
+ ig->nnetmasks = MAX_NETMASK;
+
+ rc = spdk_iscsi_init_grp_add_netmask(ig, netmask1);
+ CU_ASSERT(rc != 0);
+
+ ig->nnetmasks = 0;
+
+ /* add the same netmask to the netmask list twice */
+ rc = spdk_iscsi_init_grp_add_netmask(ig, netmask1);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_iscsi_init_grp_add_netmask(ig, netmask1);
+ CU_ASSERT(rc != 0);
+
+ /* restore the initial state */
+ rc = spdk_iscsi_init_grp_delete_netmask(ig, netmask1);
+ CU_ASSERT(rc == 0);
+
+ imask = spdk_iscsi_init_grp_find_netmask(ig, netmask1);
+ CU_ASSERT(imask == NULL);
+
+ spdk_iscsi_init_grp_destroy(ig);
+}
+
+static void
+delete_all_netmasks_success_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_netmask *imask;
+ char *netmask1 = "192.168.2.0";
+ char *netmask2 = "192.168.2.1";
+
+ ig = spdk_iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ /* add two different netmasks to the empty netmask list */
+ rc = spdk_iscsi_init_grp_add_netmask(ig, netmask1);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_iscsi_init_grp_add_netmask(ig, netmask2);
+ CU_ASSERT(rc == 0);
+
+ /* delete all netmasks */
+ spdk_iscsi_init_grp_delete_all_netmasks(ig);
+
+ /* check if two netmasks are deleted correctly. */
+ imask = spdk_iscsi_init_grp_find_netmask(ig, netmask1);
+ CU_ASSERT(imask == NULL);
+
+ imask = spdk_iscsi_init_grp_find_netmask(ig, netmask2);
+ CU_ASSERT(imask == NULL);
+
+ /* restore the initial state */
+ spdk_iscsi_init_grp_destroy(ig);
+}
+
+static void
+initiator_name_overwrite_all_to_any_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_name *iname;
+ char *all = "ALL";
+ char *any = "ANY";
+ char *all_not = "!ALL";
+ char *any_not = "!ANY";
+
+ ig = spdk_iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ rc = spdk_iscsi_init_grp_add_initiator(ig, all);
+ CU_ASSERT(rc == 0);
+
+ iname = spdk_iscsi_init_grp_find_initiator(ig, all);
+ CU_ASSERT(iname == NULL);
+
+ iname = spdk_iscsi_init_grp_find_initiator(ig, any);
+ CU_ASSERT(iname != NULL);
+
+ rc = spdk_iscsi_init_grp_delete_initiator(ig, any);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_iscsi_init_grp_add_initiator(ig, all_not);
+ CU_ASSERT(rc == 0);
+
+ iname = spdk_iscsi_init_grp_find_initiator(ig, all_not);
+ CU_ASSERT(iname == NULL);
+
+ iname = spdk_iscsi_init_grp_find_initiator(ig, any_not);
+ CU_ASSERT(iname != NULL);
+
+ rc = spdk_iscsi_init_grp_delete_initiator(ig, any_not);
+ CU_ASSERT(rc == 0);
+
+ spdk_iscsi_init_grp_destroy(ig);
+}
+
+static void
+netmask_overwrite_all_to_any_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_netmask *imask;
+ char *all = "ALL";
+ char *any = "ANY";
+
+ ig = spdk_iscsi_init_grp_create(1);
+ CU_ASSERT(ig != NULL);
+
+ rc = spdk_iscsi_init_grp_add_netmask(ig, all);
+ CU_ASSERT(rc == 0);
+
+ imask = spdk_iscsi_init_grp_find_netmask(ig, all);
+ CU_ASSERT(imask == NULL);
+
+ imask = spdk_iscsi_init_grp_find_netmask(ig, any);
+ CU_ASSERT(imask != NULL);
+
+ rc = spdk_iscsi_init_grp_delete_netmask(ig, any);
+ CU_ASSERT(rc == 0);
+
+ spdk_iscsi_init_grp_destroy(ig);
+}
+
+static void
+add_delete_initiator_names_case(void)
+{
+ int rc, i;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_name *iname;
+ char *names[3] = {"iqn.2018-02.spdk.io:0001", "iqn.2018-02.spdk.io:0002", "iqn.2018-02.spdk.io:0003"};
+
+ ig = spdk_iscsi_init_grp_create(1);
+ SPDK_CU_ASSERT_FATAL(ig != NULL);
+
+ rc = spdk_iscsi_init_grp_add_initiators(ig, 3, names);
+ CU_ASSERT(rc == 0);
+
+ for (i = 0; i < 3; i++) {
+ iname = spdk_iscsi_init_grp_find_initiator(ig, names[i]);
+ CU_ASSERT(iname != NULL);
+ }
+
+ rc = spdk_iscsi_init_grp_delete_initiators(ig, 3, names);
+ CU_ASSERT(rc == 0);
+
+ if (ig != NULL) {
+ CU_ASSERT(TAILQ_EMPTY(&ig->initiator_head));
+ }
+
+ spdk_iscsi_init_grp_destroy(ig);
+}
+
+static void
+add_duplicated_initiator_names_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ char *names[3] = {"iqn.2018-02.spdk.io:0001", "iqn.2018-02.spdk.io:0002", "iqn.2018-02.spdk.io:0001"};
+
+ ig = spdk_iscsi_init_grp_create(1);
+ SPDK_CU_ASSERT_FATAL(ig != NULL);
+
+ rc = spdk_iscsi_init_grp_add_initiators(ig, 3, names);
+ CU_ASSERT(rc != 0);
+
+ if (ig != NULL) {
+ CU_ASSERT(TAILQ_EMPTY(&ig->initiator_head));
+ }
+
+ spdk_iscsi_init_grp_destroy(ig);
+}
+
+static void
+delete_nonexisting_initiator_names_case(void)
+{
+ int rc, i;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_name *iname;
+ char *names1[3] = {"iqn.2018-02.spdk.io:0001", "iqn.2018-02.spdk.io:0002", "iqn.2018-02.spdk.io:0003"};
+ char *names2[3] = {"iqn.2018-02.spdk.io:0001", "iqn.2018-02.spdk.io:0002", "iqn.2018-02.spdk.io:0004"};
+
+ ig = spdk_iscsi_init_grp_create(1);
+ SPDK_CU_ASSERT_FATAL(ig != NULL);
+
+ rc = spdk_iscsi_init_grp_add_initiators(ig, 3, names1);
+ CU_ASSERT(rc == 0);
+
+ for (i = 0; i < 3; i++) {
+ iname = spdk_iscsi_init_grp_find_initiator(ig, names1[i]);
+ CU_ASSERT(iname != NULL);
+ }
+
+ rc = spdk_iscsi_init_grp_delete_initiators(ig, 3, names2);
+ CU_ASSERT(rc != 0);
+
+ for (i = 0; i < 3; i++) {
+ iname = spdk_iscsi_init_grp_find_initiator(ig, names1[i]);
+ CU_ASSERT(iname != NULL);
+ }
+
+ rc = spdk_iscsi_init_grp_delete_initiators(ig, 3, names1);
+ CU_ASSERT(rc == 0);
+
+ if (ig != NULL) {
+ CU_ASSERT(TAILQ_EMPTY(&ig->initiator_head));
+ }
+
+ spdk_iscsi_init_grp_destroy(ig);
+}
+
+static void
+add_delete_netmasks_case(void)
+{
+ int rc, i;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_netmask *netmask;
+ char *netmasks[3] = {"192.168.2.0", "192.168.2.1", "192.168.2.2"};
+
+ ig = spdk_iscsi_init_grp_create(1);
+ SPDK_CU_ASSERT_FATAL(ig != NULL);
+
+ rc = spdk_iscsi_init_grp_add_netmasks(ig, 3, netmasks);
+ CU_ASSERT(rc == 0);
+
+ for (i = 0; i < 3; i++) {
+ netmask = spdk_iscsi_init_grp_find_netmask(ig, netmasks[i]);
+ CU_ASSERT(netmask != NULL);
+ }
+
+ rc = spdk_iscsi_init_grp_delete_netmasks(ig, 3, netmasks);
+ CU_ASSERT(rc == 0);
+
+ if (ig != NULL) {
+ CU_ASSERT(TAILQ_EMPTY(&ig->netmask_head));
+ }
+
+ spdk_iscsi_init_grp_destroy(ig);
+}
+
+static void
+add_duplicated_netmasks_case(void)
+{
+ int rc;
+ struct spdk_iscsi_init_grp *ig;
+ char *netmasks[3] = {"192.168.2.0", "192.168.2.1", "192.168.2.0"};
+
+ ig = spdk_iscsi_init_grp_create(1);
+ SPDK_CU_ASSERT_FATAL(ig != NULL);
+
+ rc = spdk_iscsi_init_grp_add_netmasks(ig, 3, netmasks);
+ CU_ASSERT(rc != 0);
+
+ if (ig != NULL) {
+ CU_ASSERT(TAILQ_EMPTY(&ig->netmask_head));
+ }
+
+ spdk_iscsi_init_grp_destroy(ig);
+}
+
+static void
+delete_nonexisting_netmasks_case(void)
+{
+ int rc, i;
+ struct spdk_iscsi_init_grp *ig;
+ struct spdk_iscsi_initiator_netmask *netmask;
+ char *netmasks1[3] = {"192.168.2.0", "192.168.2.1", "192.168.2.2"};
+ char *netmasks2[3] = {"192.168.2.0", "192.168.2.1", "192.168.2.3"};
+
+ ig = spdk_iscsi_init_grp_create(1);
+ SPDK_CU_ASSERT_FATAL(ig != NULL);
+
+ rc = spdk_iscsi_init_grp_add_netmasks(ig, 3, netmasks1);
+ CU_ASSERT(rc == 0);
+
+ for (i = 0; i < 3; i++) {
+ netmask = spdk_iscsi_init_grp_find_netmask(ig, netmasks1[i]);
+ CU_ASSERT(netmask != NULL);
+ }
+
+ rc = spdk_iscsi_init_grp_delete_netmasks(ig, 3, netmasks2);
+ CU_ASSERT(rc != 0);
+
+ for (i = 0; i < 3; i++) {
+ netmask = spdk_iscsi_init_grp_find_netmask(ig, netmasks1[i]);
+ CU_ASSERT(netmask != NULL);
+ }
+
+ rc = spdk_iscsi_init_grp_delete_netmasks(ig, 3, netmasks1);
+ CU_ASSERT(rc == 0);
+
+ if (ig != NULL) {
+ CU_ASSERT(TAILQ_EMPTY(&ig->netmask_head));
+ }
+
+ spdk_iscsi_init_grp_destroy(ig);
+}
+
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (argc < 2) {
+ fprintf(stderr, "usage: %s <config file>\n", argv[0]);
+ exit(1);
+ }
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ config_file = argv[1];
+
+ suite = CU_add_suite("init_grp_suite", test_setup, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "create from config file cases",
+ create_from_config_file_cases) == NULL
+ || CU_add_test(suite, "create initiator group success case",
+ create_initiator_group_success_case) == NULL
+ || CU_add_test(suite, "find initiator group success case",
+ find_initiator_group_success_case) == NULL
+ || CU_add_test(suite, "register initiator group twice case",
+ register_initiator_group_twice_case) == NULL
+ || CU_add_test(suite, "add initiator name success case",
+ add_initiator_name_success_case) == NULL
+ || CU_add_test(suite, "add initiator name fail case",
+ add_initiator_name_fail_case) == NULL
+ || CU_add_test(suite, "delete all initiator names success case",
+ delete_all_initiator_names_success_case) == NULL
+ || CU_add_test(suite, "add initiator netmask success case",
+ add_netmask_success_case) == NULL
+ || CU_add_test(suite, "add initiator netmask fail case",
+ add_netmask_fail_case) == NULL
+ || CU_add_test(suite, "delete all initiator netmasks success case",
+ delete_all_netmasks_success_case) == NULL
+ || CU_add_test(suite, "overwrite all to any for name case",
+ initiator_name_overwrite_all_to_any_case) == NULL
+ || CU_add_test(suite, "overwrite all to any for netmask case",
+ netmask_overwrite_all_to_any_case) == NULL
+ || CU_add_test(suite, "add/delete initiator names case",
+ add_delete_initiator_names_case) == NULL
+ || CU_add_test(suite, "add duplicated initiator names case",
+ add_duplicated_initiator_names_case) == NULL
+ || CU_add_test(suite, "delete nonexisting initiator names case",
+ delete_nonexisting_initiator_names_case) == NULL
+ || CU_add_test(suite, "add/delete netmasks case",
+ add_delete_netmasks_case) == NULL
+ || CU_add_test(suite, "add duplicated netmasks case",
+ add_duplicated_netmasks_case) == NULL
+ || CU_add_test(suite, "delete nonexisting netmasks case",
+ delete_nonexisting_netmasks_case) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/iscsi/iscsi.c/.gitignore b/src/spdk/test/unit/lib/iscsi/iscsi.c/.gitignore
new file mode 100644
index 00000000..4d41887c
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/iscsi.c/.gitignore
@@ -0,0 +1 @@
+iscsi_ut
diff --git a/src/spdk/test/unit/lib/iscsi/iscsi.c/Makefile b/src/spdk/test/unit/lib/iscsi/iscsi.c/Makefile
new file mode 100644
index 00000000..bc9a9d8b
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/iscsi.c/Makefile
@@ -0,0 +1,48 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+SPDK_LIB_LIST = trace conf util
+
+SCSI_OBJS = port
+ISCSI_OBJS = md5 param
+LIBS += $(SCSI_OBJS:%=$(SPDK_ROOT_DIR)/lib/scsi/%.o)
+LIBS += $(ISCSI_OBJS:%=$(SPDK_ROOT_DIR)/lib/iscsi/%.o)
+LIBS += -lcunit $(ENV_LINKER_ARGS)
+
+TEST_FILE = iscsi_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/iscsi/iscsi.c/iscsi_ut.c b/src/spdk/test/unit/lib/iscsi/iscsi.c/iscsi_ut.c
new file mode 100644
index 00000000..4038a1e4
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/iscsi.c/iscsi_ut.c
@@ -0,0 +1,972 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/endian.h"
+#include "spdk/scsi.h"
+#include "spdk_cunit.h"
+
+#include "CUnit/Basic.h"
+
+#include "iscsi/iscsi.c"
+
+#include "../common.c"
+#include "iscsi/acceptor.h"
+#include "iscsi/portal_grp.h"
+#include "scsi/scsi_internal.h"
+
+#define UT_TARGET_NAME1 "iqn.2017-11.spdk.io:t0001"
+#define UT_TARGET_NAME2 "iqn.2017-11.spdk.io:t0002"
+#define UT_INITIATOR_NAME1 "iqn.2017-11.spdk.io:i0001"
+#define UT_INITIATOR_NAME2 "iqn.2017-11.spdk.io:i0002"
+
+struct spdk_iscsi_tgt_node *
+spdk_iscsi_find_tgt_node(const char *target_name)
+{
+ if (strcasecmp(target_name, UT_TARGET_NAME1) == 0) {
+ return (struct spdk_iscsi_tgt_node *)1;
+ } else {
+ return NULL;
+ }
+}
+
+bool
+spdk_iscsi_tgt_node_access(struct spdk_iscsi_conn *conn,
+ struct spdk_iscsi_tgt_node *target,
+ const char *iqn, const char *addr)
+{
+ if (strcasecmp(conn->initiator_name, UT_INITIATOR_NAME1) == 0) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+int
+spdk_iscsi_send_tgts(struct spdk_iscsi_conn *conn, const char *iiqn,
+ const char *iaddr,
+ const char *tiqn, uint8_t *data, int alloc_len, int data_len)
+{
+ return 0;
+}
+
+void
+spdk_iscsi_portal_grp_close_all(void)
+{
+}
+
+void
+spdk_iscsi_conn_migration(struct spdk_iscsi_conn *conn)
+{
+}
+
+void
+spdk_iscsi_conn_free_pdu(struct spdk_iscsi_conn *conn, struct spdk_iscsi_pdu *pdu)
+{
+}
+
+int
+spdk_iscsi_chap_get_authinfo(struct iscsi_chap_auth *auth, const char *authuser,
+ int ag_tag)
+{
+ return 0;
+}
+
+int
+spdk_scsi_lun_get_id(const struct spdk_scsi_lun *lun)
+{
+ return lun->id;
+}
+
+bool
+spdk_scsi_lun_is_removing(const struct spdk_scsi_lun *lun)
+{
+ return true;
+}
+
+struct spdk_scsi_lun *
+spdk_scsi_dev_get_lun(struct spdk_scsi_dev *dev, int lun_id)
+{
+ if (lun_id < 0 || lun_id >= SPDK_SCSI_DEV_MAX_LUN) {
+ return NULL;
+ }
+
+ return dev->lun[lun_id];
+}
+
+static void
+op_login_check_target_test(void)
+{
+ struct spdk_iscsi_conn conn;
+ struct spdk_iscsi_pdu rsp_pdu;
+ struct spdk_iscsi_tgt_node *target;
+ int rc;
+
+ /* expect success */
+ snprintf(conn.initiator_name, sizeof(conn.initiator_name),
+ "%s", UT_INITIATOR_NAME1);
+
+ rc = spdk_iscsi_op_login_check_target(&conn, &rsp_pdu,
+ UT_TARGET_NAME1, &target);
+ CU_ASSERT(rc == 0);
+
+ /* expect failure */
+ snprintf(conn.initiator_name, sizeof(conn.initiator_name),
+ "%s", UT_INITIATOR_NAME1);
+
+ rc = spdk_iscsi_op_login_check_target(&conn, &rsp_pdu,
+ UT_TARGET_NAME2, &target);
+ CU_ASSERT(rc != 0);
+
+ /* expect failure */
+ snprintf(conn.initiator_name, sizeof(conn.initiator_name),
+ "%s", UT_INITIATOR_NAME2);
+
+ rc = spdk_iscsi_op_login_check_target(&conn, &rsp_pdu,
+ UT_TARGET_NAME1, &target);
+ CU_ASSERT(rc != 0);
+}
+
+static void
+maxburstlength_test(void)
+{
+ struct spdk_iscsi_sess sess;
+ struct spdk_iscsi_conn conn;
+ struct spdk_scsi_dev dev;
+ struct spdk_scsi_lun lun;
+ struct spdk_iscsi_pdu *req_pdu, *data_out_pdu, *r2t_pdu;
+ struct iscsi_bhs_scsi_req *req;
+ struct iscsi_bhs_r2t *r2t;
+ struct iscsi_bhs_data_out *data_out;
+ struct spdk_iscsi_pdu *response_pdu;
+ int rc;
+
+ memset(&sess, 0, sizeof(sess));
+ memset(&conn, 0, sizeof(conn));
+ memset(&dev, 0, sizeof(dev));
+ memset(&lun, 0, sizeof(lun));
+
+ req_pdu = spdk_get_pdu();
+ data_out_pdu = spdk_get_pdu();
+
+ sess.ExpCmdSN = 0;
+ sess.MaxCmdSN = 64;
+ sess.session_type = SESSION_TYPE_NORMAL;
+ sess.MaxBurstLength = 1024;
+
+ lun.id = 0;
+
+ dev.lun[0] = &lun;
+
+ conn.full_feature = 1;
+ conn.sess = &sess;
+ conn.dev = &dev;
+ conn.state = ISCSI_CONN_STATE_RUNNING;
+ TAILQ_INIT(&conn.write_pdu_list);
+ TAILQ_INIT(&conn.active_r2t_tasks);
+
+ TAILQ_INIT(&g_write_pdu_list);
+
+ req_pdu->bhs.opcode = ISCSI_OP_SCSI;
+ req_pdu->data_segment_len = 0;
+
+ req = (struct iscsi_bhs_scsi_req *)&req_pdu->bhs;
+
+ to_be32(&req->cmd_sn, 0);
+ to_be32(&req->expected_data_xfer_len, 1028);
+ to_be32(&req->itt, 0x1234);
+ req->write_bit = 1;
+ req->final_bit = 1;
+
+ rc = spdk_iscsi_execute(&conn, req_pdu);
+ CU_ASSERT(rc == 0);
+
+ response_pdu = TAILQ_FIRST(&g_write_pdu_list);
+ SPDK_CU_ASSERT_FATAL(response_pdu != NULL);
+
+ /*
+ * Confirm that a correct R2T reply was sent in response to the
+ * SCSI request.
+ */
+ TAILQ_REMOVE(&g_write_pdu_list, response_pdu, tailq);
+ CU_ASSERT(response_pdu->bhs.opcode == ISCSI_OP_R2T);
+ r2t = (struct iscsi_bhs_r2t *)&response_pdu->bhs;
+ CU_ASSERT(from_be32(&r2t->desired_xfer_len) == 1024);
+ CU_ASSERT(from_be32(&r2t->buffer_offset) == 0);
+ CU_ASSERT(from_be32(&r2t->itt) == 0x1234);
+
+ data_out_pdu->bhs.opcode = ISCSI_OP_SCSI_DATAOUT;
+ data_out_pdu->bhs.flags = ISCSI_FLAG_FINAL;
+ data_out_pdu->data_segment_len = 1028;
+ data_out = (struct iscsi_bhs_data_out *)&data_out_pdu->bhs;
+ data_out->itt = r2t->itt;
+ data_out->ttt = r2t->ttt;
+ DSET24(data_out->data_segment_len, 1028);
+
+ rc = spdk_iscsi_execute(&conn, data_out_pdu);
+ CU_ASSERT(rc == SPDK_ISCSI_CONNECTION_FATAL);
+
+ SPDK_CU_ASSERT_FATAL(response_pdu->task != NULL);
+ spdk_iscsi_task_disassociate_pdu(response_pdu->task);
+ spdk_iscsi_task_put(response_pdu->task);
+ spdk_put_pdu(response_pdu);
+
+ r2t_pdu = TAILQ_FIRST(&g_write_pdu_list);
+ CU_ASSERT(r2t_pdu != NULL);
+ TAILQ_REMOVE(&g_write_pdu_list, r2t_pdu, tailq);
+ spdk_put_pdu(r2t_pdu);
+
+ spdk_put_pdu(data_out_pdu);
+ spdk_put_pdu(req_pdu);
+}
+
+static void
+underflow_for_read_transfer_test(void)
+{
+ struct spdk_iscsi_sess sess;
+ struct spdk_iscsi_conn conn;
+ struct spdk_iscsi_task task;
+ struct spdk_iscsi_pdu *pdu;
+ struct iscsi_bhs_scsi_req *scsi_req;
+ struct iscsi_bhs_data_in *datah;
+ uint32_t residual_count = 0;
+
+ TAILQ_INIT(&g_write_pdu_list);
+
+ memset(&sess, 0, sizeof(sess));
+ memset(&conn, 0, sizeof(conn));
+ memset(&task, 0, sizeof(task));
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_BURST_LENGTH;
+
+ conn.sess = &sess;
+ conn.MaxRecvDataSegmentLength = 8192;
+
+ pdu = spdk_get_pdu();
+ SPDK_CU_ASSERT_FATAL(pdu != NULL);
+
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu->bhs;
+ scsi_req->read_bit = 1;
+
+ spdk_iscsi_task_set_pdu(&task, pdu);
+ task.parent = NULL;
+
+ task.scsi.iovs = &task.scsi.iov;
+ task.scsi.iovcnt = 1;
+ task.scsi.length = 512;
+ task.scsi.transfer_len = 512;
+ task.bytes_completed = 512;
+ task.scsi.data_transferred = 256;
+ task.scsi.status = SPDK_SCSI_STATUS_GOOD;
+
+ spdk_iscsi_task_response(&conn, &task);
+ spdk_put_pdu(pdu);
+
+ /*
+ * In this case, a SCSI Data-In PDU should contain the Status
+ * for the data transfer.
+ */
+ to_be32(&residual_count, 256);
+
+ pdu = TAILQ_FIRST(&g_write_pdu_list);
+ SPDK_CU_ASSERT_FATAL(pdu != NULL);
+
+ CU_ASSERT(pdu->bhs.opcode == ISCSI_OP_SCSI_DATAIN);
+
+ datah = (struct iscsi_bhs_data_in *)&pdu->bhs;
+
+ CU_ASSERT(datah->flags == (ISCSI_DATAIN_UNDERFLOW | ISCSI_FLAG_FINAL | ISCSI_DATAIN_STATUS));
+ CU_ASSERT(datah->res_cnt == residual_count);
+
+ TAILQ_REMOVE(&g_write_pdu_list, pdu, tailq);
+ spdk_put_pdu(pdu);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_write_pdu_list));
+}
+
+static void
+underflow_for_zero_read_transfer_test(void)
+{
+ struct spdk_iscsi_sess sess;
+ struct spdk_iscsi_conn conn;
+ struct spdk_iscsi_task task;
+ struct spdk_iscsi_pdu *pdu;
+ struct iscsi_bhs_scsi_req *scsi_req;
+ struct iscsi_bhs_scsi_resp *resph;
+ uint32_t residual_count = 0, data_segment_len;
+
+ TAILQ_INIT(&g_write_pdu_list);
+
+ memset(&sess, 0, sizeof(sess));
+ memset(&conn, 0, sizeof(conn));
+ memset(&task, 0, sizeof(task));
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_BURST_LENGTH;
+
+ conn.sess = &sess;
+ conn.MaxRecvDataSegmentLength = 8192;
+
+ pdu = spdk_get_pdu();
+ SPDK_CU_ASSERT_FATAL(pdu != NULL);
+
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu->bhs;
+ scsi_req->read_bit = 1;
+
+ spdk_iscsi_task_set_pdu(&task, pdu);
+ task.parent = NULL;
+
+ task.scsi.length = 512;
+ task.scsi.transfer_len = 512;
+ task.bytes_completed = 512;
+ task.scsi.data_transferred = 0;
+ task.scsi.status = SPDK_SCSI_STATUS_GOOD;
+
+ spdk_iscsi_task_response(&conn, &task);
+ spdk_put_pdu(pdu);
+
+ /*
+ * In this case, only a SCSI Response PDU is expected and
+ * underflow must be set in it.
+ * */
+ to_be32(&residual_count, 512);
+
+ pdu = TAILQ_FIRST(&g_write_pdu_list);
+ SPDK_CU_ASSERT_FATAL(pdu != NULL);
+
+ CU_ASSERT(pdu->bhs.opcode == ISCSI_OP_SCSI_RSP);
+
+ resph = (struct iscsi_bhs_scsi_resp *)&pdu->bhs;
+
+ CU_ASSERT(resph->flags == (ISCSI_SCSI_UNDERFLOW | 0x80));
+
+ data_segment_len = DGET24(resph->data_segment_len);
+ CU_ASSERT(data_segment_len == 0);
+ CU_ASSERT(resph->res_cnt == residual_count);
+
+ TAILQ_REMOVE(&g_write_pdu_list, pdu, tailq);
+ spdk_put_pdu(pdu);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_write_pdu_list));
+}
+
+static void
+underflow_for_request_sense_test(void)
+{
+ struct spdk_iscsi_sess sess;
+ struct spdk_iscsi_conn conn;
+ struct spdk_iscsi_task task;
+ struct spdk_iscsi_pdu *pdu1, *pdu2;
+ struct iscsi_bhs_scsi_req *scsi_req;
+ struct iscsi_bhs_data_in *datah;
+ struct iscsi_bhs_scsi_resp *resph;
+ uint32_t residual_count = 0, data_segment_len;
+
+ TAILQ_INIT(&g_write_pdu_list);
+
+ memset(&sess, 0, sizeof(sess));
+ memset(&conn, 0, sizeof(conn));
+ memset(&task, 0, sizeof(task));
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_BURST_LENGTH;
+
+ conn.sess = &sess;
+ conn.MaxRecvDataSegmentLength = 8192;
+
+ pdu1 = spdk_get_pdu();
+ SPDK_CU_ASSERT_FATAL(pdu1 != NULL);
+
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu1->bhs;
+ scsi_req->read_bit = 1;
+
+ spdk_iscsi_task_set_pdu(&task, pdu1);
+ task.parent = NULL;
+
+ task.scsi.iovs = &task.scsi.iov;
+ task.scsi.iovcnt = 1;
+ task.scsi.length = 512;
+ task.scsi.transfer_len = 512;
+ task.bytes_completed = 512;
+
+ task.scsi.sense_data_len = 18;
+ task.scsi.data_transferred = 18;
+ task.scsi.status = SPDK_SCSI_STATUS_GOOD;
+
+ spdk_iscsi_task_response(&conn, &task);
+ spdk_put_pdu(pdu1);
+
+ /*
+ * In this case, a SCSI Data-In PDU and a SCSI Response PDU are returned.
+ * Sense data are set both in payload and sense area.
+ * The SCSI Data-In PDU sets FINAL and the SCSI Response PDU sets UNDERFLOW.
+ *
+ * Probably there will be different implementation but keeping current SPDK
+ * implementation by adding UT will be valuable for any implementation.
+ */
+ to_be32(&residual_count, 494);
+
+ pdu1 = TAILQ_FIRST(&g_write_pdu_list);
+ SPDK_CU_ASSERT_FATAL(pdu1 != NULL);
+
+ CU_ASSERT(pdu1->bhs.opcode == ISCSI_OP_SCSI_DATAIN);
+
+ datah = (struct iscsi_bhs_data_in *)&pdu1->bhs;
+
+ CU_ASSERT(datah->flags == ISCSI_FLAG_FINAL);
+
+ data_segment_len = DGET24(datah->data_segment_len);
+ CU_ASSERT(data_segment_len == 18);
+ CU_ASSERT(datah->res_cnt == 0);
+
+ TAILQ_REMOVE(&g_write_pdu_list, pdu1, tailq);
+ spdk_put_pdu(pdu1);
+
+ pdu2 = TAILQ_FIRST(&g_write_pdu_list);
+ /* inform scan-build (clang 6) that these pointers are not the same */
+ SPDK_CU_ASSERT_FATAL(pdu1 != pdu2);
+ SPDK_CU_ASSERT_FATAL(pdu2 != NULL);
+
+ CU_ASSERT(pdu2->bhs.opcode == ISCSI_OP_SCSI_RSP);
+
+ resph = (struct iscsi_bhs_scsi_resp *)&pdu2->bhs;
+
+ CU_ASSERT(resph->flags == (ISCSI_SCSI_UNDERFLOW | 0x80));
+
+ data_segment_len = DGET24(resph->data_segment_len);
+ CU_ASSERT(data_segment_len == task.scsi.sense_data_len + 2);
+ CU_ASSERT(resph->res_cnt == residual_count);
+
+ TAILQ_REMOVE(&g_write_pdu_list, pdu2, tailq);
+ spdk_put_pdu(pdu2);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_write_pdu_list));
+}
+
+static void
+underflow_for_check_condition_test(void)
+{
+ struct spdk_iscsi_sess sess;
+ struct spdk_iscsi_conn conn;
+ struct spdk_iscsi_task task;
+ struct spdk_iscsi_pdu *pdu;
+ struct iscsi_bhs_scsi_req *scsi_req;
+ struct iscsi_bhs_scsi_resp *resph;
+ uint32_t data_segment_len;
+
+ TAILQ_INIT(&g_write_pdu_list);
+
+ memset(&sess, 0, sizeof(sess));
+ memset(&conn, 0, sizeof(conn));
+ memset(&task, 0, sizeof(task));
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_BURST_LENGTH;
+
+ conn.sess = &sess;
+ conn.MaxRecvDataSegmentLength = 8192;
+
+ pdu = spdk_get_pdu();
+ SPDK_CU_ASSERT_FATAL(pdu != NULL);
+
+ scsi_req = (struct iscsi_bhs_scsi_req *)&pdu->bhs;
+ scsi_req->read_bit = 1;
+
+ spdk_iscsi_task_set_pdu(&task, pdu);
+ task.parent = NULL;
+
+ task.scsi.iovs = &task.scsi.iov;
+ task.scsi.iovcnt = 1;
+ task.scsi.length = 512;
+ task.scsi.transfer_len = 512;
+ task.bytes_completed = 512;
+
+ task.scsi.sense_data_len = 18;
+ task.scsi.data_transferred = 18;
+ task.scsi.status = SPDK_SCSI_STATUS_CHECK_CONDITION;
+
+ spdk_iscsi_task_response(&conn, &task);
+ spdk_put_pdu(pdu);
+
+ /*
+ * In this case, a SCSI Response PDU is returned.
+ * Sense data is set in sense area.
+ * Underflow is not set.
+ */
+ pdu = TAILQ_FIRST(&g_write_pdu_list);
+ SPDK_CU_ASSERT_FATAL(pdu != NULL);
+
+ CU_ASSERT(pdu->bhs.opcode == ISCSI_OP_SCSI_RSP);
+
+ resph = (struct iscsi_bhs_scsi_resp *)&pdu->bhs;
+
+ CU_ASSERT(resph->flags == 0x80);
+
+ data_segment_len = DGET24(resph->data_segment_len);
+ CU_ASSERT(data_segment_len == task.scsi.sense_data_len + 2);
+ CU_ASSERT(resph->res_cnt == 0);
+
+ TAILQ_REMOVE(&g_write_pdu_list, pdu, tailq);
+ spdk_put_pdu(pdu);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_write_pdu_list));
+}
+
+static void
+add_transfer_task_test(void)
+{
+ struct spdk_iscsi_sess sess;
+ struct spdk_iscsi_conn conn;
+ struct spdk_iscsi_task task;
+ struct spdk_iscsi_pdu *pdu, *tmp;
+ struct iscsi_bhs_r2t *r2th;
+ int rc, count = 0;
+ uint32_t buffer_offset, desired_xfer_len;
+
+ memset(&sess, 0, sizeof(sess));
+ memset(&conn, 0, sizeof(conn));
+ memset(&task, 0, sizeof(task));
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_BURST_LENGTH; /* 1M */
+ sess.MaxOutstandingR2T = DEFAULT_MAXR2T; /* 4 */
+
+ conn.sess = &sess;
+ TAILQ_INIT(&conn.queued_r2t_tasks);
+ TAILQ_INIT(&conn.active_r2t_tasks);
+
+ pdu = spdk_get_pdu();
+ SPDK_CU_ASSERT_FATAL(pdu != NULL);
+
+ pdu->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH; /* 64K */
+ task.scsi.transfer_len = 16 * 1024 * 1024;
+ spdk_iscsi_task_set_pdu(&task, pdu);
+
+ /* The following tests if the task is queued because R2T tasks are full. */
+ conn.pending_r2t = DEFAULT_MAXR2T;
+
+ rc = spdk_add_transfer_task(&conn, &task);
+
+ CU_ASSERT(rc == SPDK_SUCCESS);
+ CU_ASSERT(TAILQ_FIRST(&conn.queued_r2t_tasks) == &task);
+
+ TAILQ_REMOVE(&conn.queued_r2t_tasks, &task, link);
+ CU_ASSERT(TAILQ_EMPTY(&conn.queued_r2t_tasks));
+
+ /* The following tests if multiple R2Ts are issued. */
+ conn.pending_r2t = 0;
+
+ rc = spdk_add_transfer_task(&conn, &task);
+
+ CU_ASSERT(rc == SPDK_SUCCESS);
+ CU_ASSERT(TAILQ_FIRST(&conn.active_r2t_tasks) == &task);
+
+ TAILQ_REMOVE(&conn.active_r2t_tasks, &task, link);
+ CU_ASSERT(TAILQ_EMPTY(&conn.active_r2t_tasks));
+
+ CU_ASSERT(conn.data_out_cnt == 255);
+ CU_ASSERT(conn.pending_r2t == 1);
+ CU_ASSERT(conn.outstanding_r2t_tasks[0] == &task);
+ CU_ASSERT(conn.ttt == 1);
+
+ CU_ASSERT(task.data_out_cnt == 255);
+ CU_ASSERT(task.ttt == 1);
+ CU_ASSERT(task.outstanding_r2t == sess.MaxOutstandingR2T);
+ CU_ASSERT(task.next_r2t_offset ==
+ pdu->data_segment_len + sess.MaxBurstLength * sess.MaxOutstandingR2T);
+
+
+ while (!TAILQ_EMPTY(&g_write_pdu_list)) {
+ tmp = TAILQ_FIRST(&g_write_pdu_list);
+ TAILQ_REMOVE(&g_write_pdu_list, tmp, tailq);
+
+ r2th = (struct iscsi_bhs_r2t *)&tmp->bhs;
+
+ buffer_offset = from_be32(&r2th->buffer_offset);
+ CU_ASSERT(buffer_offset == pdu->data_segment_len + sess.MaxBurstLength * count);
+
+ desired_xfer_len = from_be32(&r2th->desired_xfer_len);
+ CU_ASSERT(desired_xfer_len == sess.MaxBurstLength);
+
+ spdk_put_pdu(tmp);
+ count++;
+ }
+
+ CU_ASSERT(count == DEFAULT_MAXR2T);
+
+ spdk_put_pdu(pdu);
+}
+
+static void
+get_transfer_task_test(void)
+{
+ struct spdk_iscsi_sess sess;
+ struct spdk_iscsi_conn conn;
+ struct spdk_iscsi_task task1, task2, *task;
+ struct spdk_iscsi_pdu *pdu1, *pdu2, *pdu;
+ int rc;
+
+ memset(&sess, 0, sizeof(sess));
+ memset(&conn, 0, sizeof(conn));
+ memset(&task1, 0, sizeof(task1));
+ memset(&task2, 0, sizeof(task2));
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ sess.MaxOutstandingR2T = 1;
+
+ conn.sess = &sess;
+ TAILQ_INIT(&conn.active_r2t_tasks);
+
+ pdu1 = spdk_get_pdu();
+ SPDK_CU_ASSERT_FATAL(pdu1 != NULL);
+
+ pdu1->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task1.scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ spdk_iscsi_task_set_pdu(&task1, pdu1);
+
+ rc = spdk_add_transfer_task(&conn, &task1);
+ CU_ASSERT(rc == SPDK_SUCCESS);
+
+ pdu2 = spdk_get_pdu();
+ SPDK_CU_ASSERT_FATAL(pdu2 != NULL);
+
+ pdu2->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task2.scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ spdk_iscsi_task_set_pdu(&task2, pdu2);
+
+ rc = spdk_add_transfer_task(&conn, &task2);
+ CU_ASSERT(rc == SPDK_SUCCESS);
+
+ task = spdk_get_transfer_task(&conn, 1);
+ CU_ASSERT(task == &task1);
+
+ task = spdk_get_transfer_task(&conn, 2);
+ CU_ASSERT(task == &task2);
+
+ while (!TAILQ_EMPTY(&conn.active_r2t_tasks)) {
+ task = TAILQ_FIRST(&conn.active_r2t_tasks);
+ TAILQ_REMOVE(&conn.active_r2t_tasks, task, link);
+ }
+
+ while (!TAILQ_EMPTY(&g_write_pdu_list)) {
+ pdu = TAILQ_FIRST(&g_write_pdu_list);
+ TAILQ_REMOVE(&g_write_pdu_list, pdu, tailq);
+ spdk_put_pdu(pdu);
+ }
+
+ spdk_put_pdu(pdu2);
+ spdk_put_pdu(pdu1);
+}
+
+static void
+del_transfer_task_test(void)
+{
+ struct spdk_iscsi_sess sess;
+ struct spdk_iscsi_conn conn;
+ struct spdk_iscsi_task task1, task2, task3, task4, task5, *task;
+ struct spdk_iscsi_pdu *pdu1, *pdu2, *pdu3, *pdu4, *pdu5, *pdu;
+ int rc;
+
+ memset(&sess, 0, sizeof(sess));
+ memset(&conn, 0, sizeof(conn));
+ memset(&task1, 0, sizeof(task1));
+ memset(&task2, 0, sizeof(task2));
+ memset(&task3, 0, sizeof(task3));
+ memset(&task4, 0, sizeof(task4));
+ memset(&task5, 0, sizeof(task5));
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ sess.MaxOutstandingR2T = 1;
+
+ conn.sess = &sess;
+ TAILQ_INIT(&conn.active_r2t_tasks);
+ TAILQ_INIT(&conn.queued_r2t_tasks);
+
+ pdu1 = spdk_get_pdu();
+ SPDK_CU_ASSERT_FATAL(pdu1 != NULL);
+
+ pdu1->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task1.scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ spdk_iscsi_task_set_pdu(&task1, pdu1);
+ task1.tag = 11;
+
+ rc = spdk_add_transfer_task(&conn, &task1);
+ CU_ASSERT(rc == SPDK_SUCCESS);
+
+ pdu2 = spdk_get_pdu();
+ SPDK_CU_ASSERT_FATAL(pdu2 != NULL);
+
+ pdu2->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task2.scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ spdk_iscsi_task_set_pdu(&task2, pdu2);
+ task2.tag = 12;
+
+ rc = spdk_add_transfer_task(&conn, &task2);
+ CU_ASSERT(rc == SPDK_SUCCESS);
+
+ pdu3 = spdk_get_pdu();
+ SPDK_CU_ASSERT_FATAL(pdu3 != NULL);
+
+ pdu3->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task3.scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ spdk_iscsi_task_set_pdu(&task3, pdu3);
+ task3.tag = 13;
+
+ rc = spdk_add_transfer_task(&conn, &task3);
+ CU_ASSERT(rc == SPDK_SUCCESS);
+
+ pdu4 = spdk_get_pdu();
+ SPDK_CU_ASSERT_FATAL(pdu4 != NULL);
+
+ pdu4->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task4.scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ spdk_iscsi_task_set_pdu(&task4, pdu4);
+ task4.tag = 14;
+
+ rc = spdk_add_transfer_task(&conn, &task4);
+ CU_ASSERT(rc == SPDK_SUCCESS);
+
+ pdu5 = spdk_get_pdu();
+ SPDK_CU_ASSERT_FATAL(pdu5 != NULL);
+
+ pdu5->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task5.scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ spdk_iscsi_task_set_pdu(&task5, pdu5);
+ task5.tag = 15;
+
+ rc = spdk_add_transfer_task(&conn, &task5);
+ CU_ASSERT(rc == SPDK_SUCCESS);
+
+ CU_ASSERT(spdk_get_transfer_task(&conn, 1) == &task1);
+ CU_ASSERT(spdk_get_transfer_task(&conn, 5) == NULL);
+ spdk_del_transfer_task(&conn, 11);
+ CU_ASSERT(spdk_get_transfer_task(&conn, 1) == NULL);
+ CU_ASSERT(spdk_get_transfer_task(&conn, 5) == &task5);
+
+ CU_ASSERT(spdk_get_transfer_task(&conn, 2) == &task2);
+ spdk_del_transfer_task(&conn, 12);
+ CU_ASSERT(spdk_get_transfer_task(&conn, 2) == NULL);
+
+ CU_ASSERT(spdk_get_transfer_task(&conn, 3) == &task3);
+ spdk_del_transfer_task(&conn, 13);
+ CU_ASSERT(spdk_get_transfer_task(&conn, 3) == NULL);
+
+ CU_ASSERT(spdk_get_transfer_task(&conn, 4) == &task4);
+ spdk_del_transfer_task(&conn, 14);
+ CU_ASSERT(spdk_get_transfer_task(&conn, 4) == NULL);
+
+ CU_ASSERT(spdk_get_transfer_task(&conn, 5) == &task5);
+ spdk_del_transfer_task(&conn, 15);
+ CU_ASSERT(spdk_get_transfer_task(&conn, 5) == NULL);
+
+ while (!TAILQ_EMPTY(&conn.active_r2t_tasks)) {
+ task = TAILQ_FIRST(&conn.active_r2t_tasks);
+ TAILQ_REMOVE(&conn.active_r2t_tasks, task, link);
+ }
+
+ while (!TAILQ_EMPTY(&g_write_pdu_list)) {
+ pdu = TAILQ_FIRST(&g_write_pdu_list);
+ TAILQ_REMOVE(&g_write_pdu_list, pdu, tailq);
+ spdk_put_pdu(pdu);
+ }
+
+ spdk_put_pdu(pdu5);
+ spdk_put_pdu(pdu4);
+ spdk_put_pdu(pdu3);
+ spdk_put_pdu(pdu2);
+ spdk_put_pdu(pdu1);
+}
+
+static void
+clear_all_transfer_tasks_test(void)
+{
+ struct spdk_iscsi_sess sess;
+ struct spdk_iscsi_conn conn;
+ struct spdk_iscsi_task *task1, *task2, *task3, *task4, *task5;
+ struct spdk_iscsi_pdu *pdu1, *pdu2, *pdu3, *pdu4, *pdu5, *pdu;
+ struct spdk_scsi_lun lun1, lun2;
+ int rc;
+
+ memset(&sess, 0, sizeof(sess));
+ memset(&conn, 0, sizeof(conn));
+ memset(&lun1, 0, sizeof(lun1));
+ memset(&lun2, 0, sizeof(lun2));
+
+ sess.MaxBurstLength = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ sess.MaxOutstandingR2T = 1;
+
+ conn.sess = &sess;
+ TAILQ_INIT(&conn.active_r2t_tasks);
+ TAILQ_INIT(&conn.queued_r2t_tasks);
+
+ task1 = spdk_iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task1 != NULL);
+ pdu1 = spdk_get_pdu();
+ SPDK_CU_ASSERT_FATAL(pdu1 != NULL);
+
+ pdu1->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task1->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task1->scsi.lun = &lun1;
+ spdk_iscsi_task_set_pdu(task1, pdu1);
+
+ rc = spdk_add_transfer_task(&conn, task1);
+ CU_ASSERT(rc == SPDK_SUCCESS);
+
+ task2 = spdk_iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task2 != NULL);
+ pdu2 = spdk_get_pdu();
+ SPDK_CU_ASSERT_FATAL(pdu2 != NULL);
+
+ pdu2->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task2->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task2->scsi.lun = &lun1;
+ spdk_iscsi_task_set_pdu(task2, pdu2);
+
+ rc = spdk_add_transfer_task(&conn, task2);
+ CU_ASSERT(rc == SPDK_SUCCESS);
+
+ task3 = spdk_iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task3 != NULL);
+ pdu3 = spdk_get_pdu();
+ SPDK_CU_ASSERT_FATAL(pdu3 != NULL);
+
+ pdu3->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task3->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task3->scsi.lun = &lun1;
+ spdk_iscsi_task_set_pdu(task3, pdu3);
+
+ rc = spdk_add_transfer_task(&conn, task3);
+ CU_ASSERT(rc == SPDK_SUCCESS);
+
+ task4 = spdk_iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task4 != NULL);
+ pdu4 = spdk_get_pdu();
+ SPDK_CU_ASSERT_FATAL(pdu4 != NULL);
+
+ pdu4->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task4->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task4->scsi.lun = &lun2;
+ spdk_iscsi_task_set_pdu(task4, pdu4);
+
+ rc = spdk_add_transfer_task(&conn, task4);
+ CU_ASSERT(rc == SPDK_SUCCESS);
+
+ task5 = spdk_iscsi_task_get(&conn, NULL, NULL);
+ SPDK_CU_ASSERT_FATAL(task5 != NULL);
+ pdu5 = spdk_get_pdu();
+ SPDK_CU_ASSERT_FATAL(pdu5 != NULL);
+
+ pdu5->data_segment_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task5->scsi.transfer_len = SPDK_ISCSI_MAX_RECV_DATA_SEGMENT_LENGTH;
+ task5->scsi.lun = &lun2;
+ spdk_iscsi_task_set_pdu(task5, pdu5);
+
+ rc = spdk_add_transfer_task(&conn, task5);
+ CU_ASSERT(rc == SPDK_SUCCESS);
+
+ CU_ASSERT(conn.ttt == 4);
+
+ CU_ASSERT(spdk_get_transfer_task(&conn, 1) == task1);
+ CU_ASSERT(spdk_get_transfer_task(&conn, 2) == task2);
+ CU_ASSERT(spdk_get_transfer_task(&conn, 3) == task3);
+ CU_ASSERT(spdk_get_transfer_task(&conn, 4) == task4);
+ CU_ASSERT(spdk_get_transfer_task(&conn, 5) == NULL);
+
+ spdk_clear_all_transfer_task(&conn, &lun1);
+
+ CU_ASSERT(TAILQ_EMPTY(&conn.queued_r2t_tasks));
+ CU_ASSERT(spdk_get_transfer_task(&conn, 1) == NULL);
+ CU_ASSERT(spdk_get_transfer_task(&conn, 2) == NULL);
+ CU_ASSERT(spdk_get_transfer_task(&conn, 3) == NULL);
+ CU_ASSERT(spdk_get_transfer_task(&conn, 4) == task4);
+ CU_ASSERT(spdk_get_transfer_task(&conn, 5) == task5);
+
+ spdk_clear_all_transfer_task(&conn, NULL);
+
+ CU_ASSERT(spdk_get_transfer_task(&conn, 4) == NULL);
+ CU_ASSERT(spdk_get_transfer_task(&conn, 5) == NULL);
+
+ CU_ASSERT(TAILQ_EMPTY(&conn.active_r2t_tasks));
+ while (!TAILQ_EMPTY(&g_write_pdu_list)) {
+ pdu = TAILQ_FIRST(&g_write_pdu_list);
+ TAILQ_REMOVE(&g_write_pdu_list, pdu, tailq);
+ spdk_put_pdu(pdu);
+ }
+
+ spdk_put_pdu(pdu5);
+ spdk_put_pdu(pdu4);
+ spdk_put_pdu(pdu3);
+ spdk_put_pdu(pdu2);
+ spdk_put_pdu(pdu1);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("iscsi_suite", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "login check target test", op_login_check_target_test) == NULL
+ || CU_add_test(suite, "maxburstlength test", maxburstlength_test) == NULL
+ || CU_add_test(suite, "underflow for read transfer test",
+ underflow_for_read_transfer_test) == NULL
+ || CU_add_test(suite, "underflow for zero read transfer test",
+ underflow_for_zero_read_transfer_test) == NULL
+ || CU_add_test(suite, "underflow for request sense test",
+ underflow_for_request_sense_test) == NULL
+ || CU_add_test(suite, "underflow for check condition test",
+ underflow_for_check_condition_test) == NULL
+ || CU_add_test(suite, "add transfer task test", add_transfer_task_test) == NULL
+ || CU_add_test(suite, "get transfer task test", get_transfer_task_test) == NULL
+ || CU_add_test(suite, "del transfer task test", del_transfer_task_test) == NULL
+ || CU_add_test(suite, "clear all transfer tasks test",
+ clear_all_transfer_tasks_test) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/iscsi/param.c/.gitignore b/src/spdk/test/unit/lib/iscsi/param.c/.gitignore
new file mode 100644
index 00000000..26992146
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/param.c/.gitignore
@@ -0,0 +1 @@
+param_ut
diff --git a/src/spdk/test/unit/lib/iscsi/param.c/Makefile b/src/spdk/test/unit/lib/iscsi/param.c/Makefile
new file mode 100644
index 00000000..bc944ae9
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/param.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = param_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/iscsi/param.c/param_ut.c b/src/spdk/test/unit/lib/iscsi/param.c/param_ut.c
new file mode 100644
index 00000000..40941923
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/param.c/param_ut.c
@@ -0,0 +1,397 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/scsi.h"
+
+#include "spdk_cunit.h"
+
+#include "../common.c"
+#include "iscsi/param.c"
+
+struct spdk_iscsi_globals g_spdk_iscsi;
+
+struct spdk_iscsi_tgt_node *
+spdk_iscsi_find_tgt_node(const char *target_name)
+{
+ return NULL;
+}
+
+bool
+spdk_iscsi_tgt_node_access(struct spdk_iscsi_conn *conn,
+ struct spdk_iscsi_tgt_node *target,
+ const char *iqn, const char *addr)
+{
+ return false;
+}
+
+int
+spdk_iscsi_send_tgts(struct spdk_iscsi_conn *conn, const char *iiqn,
+ const char *iaddr,
+ const char *tiqn, uint8_t *data, int alloc_len, int data_len)
+{
+ return 0;
+}
+
+static void
+burst_length_param_negotation(int FirstBurstLength, int MaxBurstLength,
+ int initialR2T)
+{
+ struct spdk_iscsi_sess sess;
+ struct spdk_iscsi_conn conn;
+ struct iscsi_param *params;
+ struct iscsi_param **params_p;
+ char data[8192];
+ int rc;
+ int total, len;
+
+ total = 0;
+ params = NULL;
+ params_p = &params;
+
+ memset(&sess, 0, sizeof(sess));
+ memset(&conn, 0, sizeof(conn));
+ memset(data, 0, 8192);
+
+ sess.ExpCmdSN = 0;
+ sess.MaxCmdSN = 64;
+ sess.session_type = SESSION_TYPE_NORMAL;
+ sess.params = NULL;
+ sess.MaxBurstLength = 65536;
+ sess.InitialR2T = true;
+ sess.FirstBurstLength = SPDK_ISCSI_FIRST_BURST_LENGTH;
+ sess.MaxOutstandingR2T = 1;
+
+ /* set default params */
+ rc = spdk_iscsi_sess_params_init(&sess.params);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_iscsi_param_set_int(sess.params, "FirstBurstLength",
+ sess.FirstBurstLength);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_iscsi_param_set_int(sess.params, "MaxBurstLength",
+ sess.MaxBurstLength);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_iscsi_param_set(sess.params, "InitialR2T",
+ sess.InitialR2T ? "Yes" : "No");
+ CU_ASSERT(rc == 0);
+
+ conn.full_feature = 1;
+ conn.sess = &sess;
+ conn.MaxRecvDataSegmentLength = 65536;
+
+ rc = spdk_iscsi_conn_params_init(&conn.params);
+ CU_ASSERT(rc == 0);
+
+ /* construct the data */
+ len = snprintf(data + total, 8192 - total, "%s=%d",
+ "FirstBurstLength", FirstBurstLength);
+ total += len + 1;
+
+ len = snprintf(data + total, 8192 - total, "%s=%d",
+ "MaxBurstLength", MaxBurstLength);
+ total += len + 1;
+
+ len = snprintf(data + total, 8192 - total, "%s=%d",
+ "InitialR2T", initialR2T);
+ total += len + 1;
+
+ /* add one extra NUL byte at the end to match real iSCSI params */
+ total++;
+
+ /* store incoming parameters */
+ rc = spdk_iscsi_parse_params(params_p, data, total, false, NULL);
+ CU_ASSERT(rc == 0);
+
+ /* negotiate parameters */
+ rc = spdk_iscsi_negotiate_params(&conn, params_p,
+ data, 8192, rc);
+ CU_ASSERT(rc > 0);
+
+ rc = spdk_iscsi_copy_param2var(&conn);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(conn.sess->FirstBurstLength <= SPDK_ISCSI_FIRST_BURST_LENGTH);
+ CU_ASSERT(conn.sess->FirstBurstLength <= conn.sess->MaxBurstLength);
+ CU_ASSERT(conn.sess->MaxBurstLength <= SPDK_ISCSI_MAX_BURST_LENGTH);
+ CU_ASSERT(conn.sess->MaxOutstandingR2T == 1);
+
+ spdk_iscsi_param_free(sess.params);
+ spdk_iscsi_param_free(conn.params);
+ spdk_iscsi_param_free(*params_p);
+}
+
+static void
+param_negotiation_test(void)
+{
+ burst_length_param_negotation(8192, 16384, 0);
+ burst_length_param_negotation(8192, 16384, 1);
+ burst_length_param_negotation(8192, 1024, 1);
+ burst_length_param_negotation(8192, 1024, 0);
+ burst_length_param_negotation(512, 1024, 1);
+ burst_length_param_negotation(512, 1024, 0);
+}
+
+static void
+list_negotiation_test(void)
+{
+ int add_param_value = 0;
+ struct iscsi_param param = {};
+ char *new_val;
+ char valid_list_buf[1024];
+ char in_val_buf[1024];
+
+#define TEST_LIST(valid_list, in_val, expected_result) \
+ do { \
+ snprintf(valid_list_buf, sizeof(valid_list_buf), "%s", valid_list); \
+ snprintf(in_val_buf, sizeof(in_val_buf), "%s", in_val); \
+ new_val = spdk_iscsi_negotiate_param_list(&add_param_value, &param, valid_list_buf, in_val_buf, NULL); \
+ if (expected_result) { \
+ SPDK_CU_ASSERT_FATAL(new_val != NULL); \
+ CU_ASSERT_STRING_EQUAL(new_val, expected_result); \
+ } \
+ } while (0)
+
+ TEST_LIST("None", "None", "None");
+ TEST_LIST("CHAP,None", "None", "None");
+ TEST_LIST("CHAP,None", "CHAP", "CHAP");
+ TEST_LIST("KRB5,SRP,CHAP,None", "SRP,CHAP,None", "SRP");
+ TEST_LIST("KRB5,SRP,CHAP,None", "CHAP,SRP,None", "CHAP");
+ TEST_LIST("KRB5,SRP,CHAP,None", "SPKM1,SRP,CHAP,None", "SRP");
+ TEST_LIST("KRB5,SRP,None", "CHAP,None", "None");
+}
+
+#define PARSE(strconst, partial_enabled, partial_text) \
+ data = strconst; \
+ len = sizeof(strconst); \
+ rc = spdk_iscsi_parse_params(&params, data, len, partial_enabled, partial_text)
+
+#define EXPECT_VAL(key, expected_value) \
+ { \
+ const char *val = spdk_iscsi_param_get_val(params, key); \
+ CU_ASSERT(val != NULL); \
+ if (val != NULL) { \
+ CU_ASSERT(strcmp(val, expected_value) == 0); \
+ } \
+ }
+
+#define EXPECT_NULL(key) \
+ CU_ASSERT(spdk_iscsi_param_get_val(params, key) == NULL)
+
+static void
+parse_valid_test(void)
+{
+ struct iscsi_param *params = NULL;
+ int rc;
+ char *data;
+ int len;
+ char *partial_parameter = NULL;
+
+ /* simple test with a single key=value */
+ PARSE("Abc=def\0", false, NULL);
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("Abc", "def");
+
+ /* multiple key=value pairs */
+ PARSE("Aaa=bbbbbb\0Xyz=test\0", false, NULL);
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("Aaa", "bbbbbb");
+ EXPECT_VAL("Xyz", "test");
+
+ /* value with embedded '=' */
+ PARSE("A=b=c\0", false, NULL);
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("A", "b=c");
+
+ /* CHAP_C=AAAA.... with value length 8192 */
+ len = strlen("CHAP_C=") + ISCSI_TEXT_MAX_VAL_LEN + 1/* null terminators */;
+ data = malloc(len);
+ SPDK_CU_ASSERT_FATAL(data != NULL);
+ memset(data, 'A', len);
+ memcpy(data, "CHAP_C", 6);
+ data[6] = '=';
+ data[len - 1] = '\0';
+ rc = spdk_iscsi_parse_params(&params, data, len, false, NULL);
+ CU_ASSERT(rc == 0);
+ free(data);
+
+ /* partial parameter: value is partial */
+ PARSE("C=AAA\0D=B", true, &partial_parameter);
+ SPDK_CU_ASSERT_FATAL(partial_parameter != NULL);
+ CU_ASSERT_STRING_EQUAL(partial_parameter, "D=B");
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("C", "AAA");
+ EXPECT_NULL("D");
+ PARSE("XXXX\0E=UUUU\0", false, &partial_parameter);
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("D", "BXXXX");
+ EXPECT_VAL("E", "UUUU");
+ CU_ASSERT_PTR_NULL(partial_parameter);
+
+ /* partial parameter: key is partial */
+ PARSE("IAMAFAK", true, &partial_parameter);
+ CU_ASSERT_STRING_EQUAL(partial_parameter, "IAMAFAK");
+ CU_ASSERT(rc == 0);
+ EXPECT_NULL("IAMAFAK");
+ PARSE("EDKEY=TTTT\0F=IIII", false, &partial_parameter);
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("IAMAFAKEDKEY", "TTTT");
+ EXPECT_VAL("F", "IIII");
+ CU_ASSERT_PTR_NULL(partial_parameter);
+
+ /* Second partial parameter is the only parameter */
+ PARSE("OOOO", true, &partial_parameter);
+ CU_ASSERT_STRING_EQUAL(partial_parameter, "OOOO");
+ CU_ASSERT(rc == 0);
+ EXPECT_NULL("OOOO");
+ PARSE("LL=MMMM", false, &partial_parameter);
+ CU_ASSERT(rc == 0);
+ EXPECT_VAL("OOOOLL", "MMMM");
+ CU_ASSERT_PTR_NULL(partial_parameter);
+
+ spdk_iscsi_param_free(params);
+}
+
+static void
+parse_invalid_test(void)
+{
+ struct iscsi_param *params = NULL;
+ int rc;
+ char *data;
+ int len;
+
+ /* key without '=' */
+ PARSE("Abc\0", false, NULL);
+ CU_ASSERT(rc != 0);
+ EXPECT_NULL("Abc");
+
+ /* multiple key=value pairs, one missing '=' */
+ PARSE("Abc=def\0Xyz\0Www=test\0", false, NULL);
+ CU_ASSERT(rc != 0);
+ EXPECT_VAL("Abc", "def");
+ EXPECT_NULL("Xyz");
+ EXPECT_NULL("Www");
+
+ /* empty key */
+ PARSE("=abcdef", false, NULL);
+ CU_ASSERT(rc != 0);
+ EXPECT_NULL("");
+
+ /* CHAP_C=AAAA.... with value length 8192 + 1 */
+ len = strlen("CHAP_C=") + ISCSI_TEXT_MAX_VAL_LEN + 1 /* max value len + 1 */ +
+ 1 /* null terminators */;
+ data = malloc(len);
+ SPDK_CU_ASSERT_FATAL(data != NULL);
+ memset(data, 'A', len);
+ memcpy(data, "CHAP_C", 6);
+ data[6] = '=';
+ data[len - 1] = '\0';
+ rc = spdk_iscsi_parse_params(&params, data, len, false, NULL);
+ free(data);
+ CU_ASSERT(rc != 0);
+ EXPECT_NULL("CHAP_C");
+
+ /* Test simple value, length of value bigger than 255 */
+ len = strlen("A=") + ISCSI_TEXT_MAX_SIMPLE_VAL_LEN + 1 /* max simple value len + 1 */ +
+ 1 /* null terminators */;
+ data = malloc(len);
+ SPDK_CU_ASSERT_FATAL(data != NULL);
+ memset(data, 'A', len);
+ data[1] = '=';
+ data[len - 1] = '\0';
+ rc = spdk_iscsi_parse_params(&params, data, len, false, NULL);
+ free(data);
+ CU_ASSERT(rc != 0);
+ EXPECT_NULL("A");
+
+ /* key length bigger than 63 */
+ len = ISCSI_TEXT_MAX_KEY_LEN + 1 /* max key length + 1 */ + 1 /* = */ + 1 /* A */ +
+ 1 /* null terminators */;
+ data = malloc(len);
+ SPDK_CU_ASSERT_FATAL(data != NULL);
+ memset(data, 'A', len);
+ data[64] = '=';
+ data[len - 1] = '\0';
+ rc = spdk_iscsi_parse_params(&params, data, len, false, NULL);
+ free(data);
+ CU_ASSERT(rc != 0);
+ EXPECT_NULL("A");
+
+ /* duplicated key */
+ PARSE("B=BB", false, NULL);
+ CU_ASSERT(rc == 0);
+ PARSE("B=BBBB", false, NULL);
+ CU_ASSERT(rc != 0);
+ EXPECT_VAL("B", "BB");
+
+ spdk_iscsi_param_free(params);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("iscsi_suite", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "param negotiation test",
+ param_negotiation_test) == NULL ||
+ CU_add_test(suite, "list negotiation test",
+ list_negotiation_test) == NULL ||
+ CU_add_test(suite, "parse valid test",
+ parse_valid_test) == NULL ||
+ CU_add_test(suite, "parse invalid test",
+ parse_invalid_test) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/iscsi/portal_grp.c/.gitignore b/src/spdk/test/unit/lib/iscsi/portal_grp.c/.gitignore
new file mode 100644
index 00000000..106ffebc
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/portal_grp.c/.gitignore
@@ -0,0 +1 @@
+portal_grp_ut
diff --git a/src/spdk/test/unit/lib/iscsi/portal_grp.c/Makefile b/src/spdk/test/unit/lib/iscsi/portal_grp.c/Makefile
new file mode 100644
index 00000000..ab28cabb
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/portal_grp.c/Makefile
@@ -0,0 +1,42 @@
+#
+## BSD LICENSE
+#
+## Copyright (c) Intel Corporation.
+# All rights reserved.
+# #
+# Redistribution and use in source and binary forms, with or without
+# # modification, are permitted provided that the following conditions
+# are met:
+# #
+# * Redistributions of source code must retain the above copyright
+# # notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# # notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# # distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# # contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+# #
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# #
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+SPDK_LIB_LIST = conf
+
+TEST_FILE = portal_grp_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/iscsi/portal_grp.c/portal_grp_ut.c b/src/spdk/test/unit/lib/iscsi/portal_grp.c/portal_grp_ut.c
new file mode 100644
index 00000000..77351f0a
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/portal_grp.c/portal_grp_ut.c
@@ -0,0 +1,477 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk/event.h"
+
+#include "spdk_cunit.h"
+
+#include "../common.c"
+#include "iscsi/portal_grp.c"
+#include "unit/lib/json_mock.c"
+
+struct spdk_iscsi_globals g_spdk_iscsi;
+
+static int
+test_setup(void)
+{
+ TAILQ_INIT(&g_spdk_iscsi.portal_head);
+ TAILQ_INIT(&g_spdk_iscsi.pg_head);
+ pthread_mutex_init(&g_spdk_iscsi.mutex, NULL);
+ return 0;
+}
+
+static void
+portal_create_ipv4_normal_case(void)
+{
+ struct spdk_iscsi_portal *p;
+
+ const char *host = "192.168.2.0";
+ const char *port = "3260";
+ const char *cpumask = "1";
+
+ p = spdk_iscsi_portal_create(host, port, cpumask);
+ CU_ASSERT(p != NULL);
+
+ spdk_iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_iscsi.portal_head));
+}
+
+static void
+portal_create_ipv6_normal_case(void)
+{
+ struct spdk_iscsi_portal *p;
+
+ const char *host = "[2001:ad6:1234::]";
+ const char *port = "3260";
+ const char *cpumask = "1";
+
+ p = spdk_iscsi_portal_create(host, port, cpumask);
+ CU_ASSERT(p != NULL);
+
+ spdk_iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_iscsi.portal_head));
+}
+
+static void
+portal_create_ipv4_wildcard_case(void)
+{
+ struct spdk_iscsi_portal *p;
+
+ const char *host = "*";
+ const char *port = "3260";
+ const char *cpumask = "1";
+
+ p = spdk_iscsi_portal_create(host, port, cpumask);
+ CU_ASSERT(p != NULL);
+
+ spdk_iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_iscsi.portal_head));
+}
+
+static void
+portal_create_ipv6_wildcard_case(void)
+{
+ struct spdk_iscsi_portal *p;
+
+ const char *host = "[*]";
+ const char *port = "3260";
+ const char *cpumask = "1";
+
+ p = spdk_iscsi_portal_create(host, port, cpumask);
+ CU_ASSERT(p != NULL);
+
+ spdk_iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_iscsi.portal_head));
+}
+
+static void
+portal_create_cpumask_null_case(void)
+{
+ struct spdk_iscsi_portal *p;
+
+ const char *host = "192.168.2.0";
+ const char *port = "3260";
+ const char *cpumask = NULL;
+
+ p = spdk_iscsi_portal_create(host, port, cpumask);
+ CU_ASSERT(p != NULL);
+
+ spdk_iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_iscsi.portal_head));
+}
+
+static void
+portal_create_cpumask_no_bit_on_case(void)
+{
+ struct spdk_iscsi_portal *p;
+
+ const char *host = "192.168.2.0";
+ const char *port = "3260";
+ const char *cpumask = "0";
+
+ p = spdk_iscsi_portal_create(host, port, cpumask);
+ CU_ASSERT(p == NULL);
+}
+
+static void
+portal_create_twice_case(void)
+{
+ struct spdk_iscsi_portal *p1, *p2;
+
+ const char *host = "192.168.2.0";
+ const char *port = "3260";
+ const char *cpumask = "1";
+
+ p1 = spdk_iscsi_portal_create(host, port, cpumask);
+ CU_ASSERT(p1 != NULL);
+
+ p2 = spdk_iscsi_portal_create(host, port, cpumask);
+ CU_ASSERT(p2 == NULL);
+
+ spdk_iscsi_portal_destroy(p1);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_iscsi.portal_head));
+}
+
+static void
+parse_portal_ipv4_normal_case(void)
+{
+ const char *string = "192.168.2.0:3260@1";
+ const char *host_str = "192.168.2.0";
+ const char *port_str = "3260";
+ struct spdk_cpuset *cpumask_val;
+ struct spdk_iscsi_portal *p = NULL;
+ int rc;
+
+ cpumask_val = spdk_cpuset_alloc();
+ SPDK_CU_ASSERT_FATAL(cpumask_val != NULL);
+
+ spdk_cpuset_set_cpu(cpumask_val, 0, true);
+
+ rc = spdk_iscsi_parse_portal(string, &p, 0);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(p != NULL);
+ CU_ASSERT(strcmp(p->host, host_str) == 0);
+ CU_ASSERT(strcmp(p->port, port_str) == 0);
+ CU_ASSERT(spdk_cpuset_equal(p->cpumask, cpumask_val));
+
+ spdk_iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_iscsi.portal_head));
+
+ spdk_cpuset_free(cpumask_val);
+}
+
+static void
+parse_portal_ipv6_normal_case(void)
+{
+ const char *string = "[2001:ad6:1234::]:3260@1";
+ const char *host_str = "[2001:ad6:1234::]";
+ const char *port_str = "3260";
+ struct spdk_cpuset *cpumask_val;
+ struct spdk_iscsi_portal *p = NULL;
+ int rc;
+
+ cpumask_val = spdk_cpuset_alloc();
+ SPDK_CU_ASSERT_FATAL(cpumask_val != NULL);
+
+ spdk_cpuset_set_cpu(cpumask_val, 0, true);
+
+ rc = spdk_iscsi_parse_portal(string, &p, 0);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(p != NULL);
+ CU_ASSERT(strcmp(p->host, host_str) == 0);
+ CU_ASSERT(strcmp(p->port, port_str) == 0);
+ CU_ASSERT(spdk_cpuset_equal(p->cpumask, cpumask_val));
+
+ spdk_iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_iscsi.portal_head));
+
+ spdk_cpuset_free(cpumask_val);
+}
+
+static void
+parse_portal_ipv4_skip_cpumask_case(void)
+{
+ const char *string = "192.168.2.0:3260";
+ const char *host_str = "192.168.2.0";
+ const char *port_str = "3260";
+ struct spdk_cpuset *cpumask_val;
+ struct spdk_iscsi_portal *p = NULL;
+ int rc;
+
+ cpumask_val = spdk_app_get_core_mask();
+
+ rc = spdk_iscsi_parse_portal(string, &p, 0);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(p != NULL);
+ CU_ASSERT(strcmp(p->host, host_str) == 0);
+ CU_ASSERT(strcmp(p->port, port_str) == 0);
+ CU_ASSERT(spdk_cpuset_equal(p->cpumask, cpumask_val));
+
+ spdk_iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_iscsi.portal_head));
+}
+
+static void
+parse_portal_ipv6_skip_cpumask_case(void)
+{
+ const char *string = "[2001:ad6:1234::]:3260";
+ const char *host_str = "[2001:ad6:1234::]";
+ const char *port_str = "3260";
+ struct spdk_cpuset *cpumask_val;
+ struct spdk_iscsi_portal *p = NULL;
+ int rc;
+
+ cpumask_val = spdk_app_get_core_mask();
+
+ rc = spdk_iscsi_parse_portal(string, &p, 0);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(p != NULL);
+ CU_ASSERT(strcmp(p->host, host_str) == 0);
+ CU_ASSERT(strcmp(p->port, port_str) == 0);
+ CU_ASSERT(spdk_cpuset_equal(p->cpumask, cpumask_val));
+
+ spdk_iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_iscsi.portal_head));
+}
+
+static void
+parse_portal_ipv4_skip_port_and_cpumask_case(void)
+{
+ const char *string = "192.168.2.0";
+ const char *host_str = "192.168.2.0";
+ const char *port_str = "3260";
+ struct spdk_cpuset *cpumask_val;
+ struct spdk_iscsi_portal *p = NULL;
+ int rc;
+
+ cpumask_val = spdk_app_get_core_mask();
+
+ rc = spdk_iscsi_parse_portal(string, &p, 0);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(p != NULL);
+ CU_ASSERT(strcmp(p->host, host_str) == 0);
+ CU_ASSERT(strcmp(p->port, port_str) == 0);
+ CU_ASSERT(spdk_cpuset_equal(p->cpumask, cpumask_val));
+
+ spdk_iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_iscsi.portal_head));
+}
+
+static void
+parse_portal_ipv6_skip_port_and_cpumask_case(void)
+{
+ const char *string = "[2001:ad6:1234::]";
+ const char *host_str = "[2001:ad6:1234::]";
+ const char *port_str = "3260";
+ struct spdk_cpuset *cpumask_val;
+ struct spdk_iscsi_portal *p = NULL;
+ int rc;
+
+ cpumask_val = spdk_app_get_core_mask();
+
+ rc = spdk_iscsi_parse_portal(string, &p, 0);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(p != NULL);
+ CU_ASSERT(strcmp(p->host, host_str) == 0);
+ CU_ASSERT(strcmp(p->port, port_str) == 0);
+ CU_ASSERT(spdk_cpuset_equal(p->cpumask, cpumask_val));
+
+ spdk_iscsi_portal_destroy(p);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_iscsi.portal_head));
+}
+
+static void
+portal_grp_register_unregister_case(void)
+{
+ struct spdk_iscsi_portal *p;
+ struct spdk_iscsi_portal_grp *pg1, *pg2;
+ int rc;
+ const char *host = "192.168.2.0";
+ const char *port = "3260";
+ const char *cpumask = "1";
+
+ pg1 = spdk_iscsi_portal_grp_create(1);
+ CU_ASSERT(pg1 != NULL);
+
+ p = spdk_iscsi_portal_create(host, port, cpumask);
+ CU_ASSERT(p != NULL);
+
+ spdk_iscsi_portal_grp_add_portal(pg1, p);
+
+ rc = spdk_iscsi_portal_grp_register(pg1);
+ CU_ASSERT(rc == 0);
+
+ pg2 = spdk_iscsi_portal_grp_unregister(1);
+ CU_ASSERT(pg2 != NULL);
+ CU_ASSERT(pg1 == pg2);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_iscsi.pg_head));
+
+ spdk_iscsi_portal_grp_destroy(pg1);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_iscsi.portal_head));
+}
+
+static void
+portal_grp_register_twice_case(void)
+{
+ struct spdk_iscsi_portal *p;
+ struct spdk_iscsi_portal_grp *pg1, *pg2;
+ int rc;
+ const char *host = "192.168.2.0";
+ const char *port = "3260";
+ const char *cpumask = "1";
+
+ pg1 = spdk_iscsi_portal_grp_create(1);
+ CU_ASSERT(pg1 != NULL);
+
+ p = spdk_iscsi_portal_create(host, port, cpumask);
+ CU_ASSERT(p != NULL);
+
+ spdk_iscsi_portal_grp_add_portal(pg1, p);
+
+ rc = spdk_iscsi_portal_grp_register(pg1);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_iscsi_portal_grp_register(pg1);
+ CU_ASSERT(rc != 0);
+
+ pg2 = spdk_iscsi_portal_grp_unregister(1);
+ CU_ASSERT(pg2 != NULL);
+ CU_ASSERT(pg1 == pg2);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_iscsi.pg_head));
+
+ spdk_iscsi_portal_grp_destroy(pg1);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_iscsi.portal_head));
+}
+
+static void
+portal_grp_add_delete_case(void)
+{
+ struct spdk_iscsi_portal_grp *pg1, *pg2;
+ struct spdk_iscsi_portal *p;
+ int rc;
+
+ const char *host = "192.168.2.0";
+ const char *port = "3260";
+ const char *cpumask = "1";
+
+ /* internal of add_portal_group */
+ pg1 = spdk_iscsi_portal_grp_create(1);
+ CU_ASSERT(pg1 != NULL);
+
+ p = spdk_iscsi_portal_create(host, port, cpumask);
+ CU_ASSERT(p != NULL);
+
+ spdk_iscsi_portal_grp_add_portal(pg1, p);
+
+ rc = spdk_iscsi_portal_grp_open(pg1);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_iscsi_portal_grp_register(pg1);
+ CU_ASSERT(rc == 0);
+
+ /* internal of delete_portal_group */
+ pg2 = spdk_iscsi_portal_grp_unregister(1);
+ CU_ASSERT(pg2 != NULL);
+ CU_ASSERT(pg1 == pg2);
+
+ spdk_iscsi_portal_grp_release(pg2);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_iscsi.portal_head));
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_iscsi.pg_head));
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("portal_grp_suite", test_setup, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "portal create ipv4 normal case",
+ portal_create_ipv4_normal_case) == NULL
+ || CU_add_test(suite, "portal create ipv6 normal case",
+ portal_create_ipv6_normal_case) == NULL
+ || CU_add_test(suite, "portal create ipv4 wildcard case",
+ portal_create_ipv4_wildcard_case) == NULL
+ || CU_add_test(suite, "portal create ipv6 wildcard case",
+ portal_create_ipv6_wildcard_case) == NULL
+ || CU_add_test(suite, "portal create cpumask NULL case",
+ portal_create_cpumask_null_case) == NULL
+ || CU_add_test(suite, "portal create cpumask no bit on case",
+ portal_create_cpumask_no_bit_on_case) == NULL
+ || CU_add_test(suite, "portal create twice case",
+ portal_create_twice_case) == NULL
+ || CU_add_test(suite, "parse portal ipv4 normal case",
+ parse_portal_ipv4_normal_case) == NULL
+ || CU_add_test(suite, "parse portal ipv6 normal case",
+ parse_portal_ipv6_normal_case) == NULL
+ || CU_add_test(suite, "parse portal ipv4 skip cpumask case",
+ parse_portal_ipv4_skip_cpumask_case) == NULL
+ || CU_add_test(suite, "parse portal ipv6 skip cpumask case",
+ parse_portal_ipv6_skip_cpumask_case) == NULL
+ || CU_add_test(suite, "parse portal ipv4 skip port and cpumask case",
+ parse_portal_ipv4_skip_port_and_cpumask_case) == NULL
+ || CU_add_test(suite, "parse portal ipv6 skip port and cpumask case",
+ parse_portal_ipv6_skip_port_and_cpumask_case) == NULL
+ || CU_add_test(suite, "portal group register/unregister case",
+ portal_grp_register_unregister_case) == NULL
+ || CU_add_test(suite, "portal group register twice case",
+ portal_grp_register_twice_case) == NULL
+ || CU_add_test(suite, "portal group add/delete case",
+ portal_grp_add_delete_case) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/iscsi/tgt_node.c/.gitignore b/src/spdk/test/unit/lib/iscsi/tgt_node.c/.gitignore
new file mode 100644
index 00000000..010d84b8
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/tgt_node.c/.gitignore
@@ -0,0 +1 @@
+tgt_node_ut
diff --git a/src/spdk/test/unit/lib/iscsi/tgt_node.c/Makefile b/src/spdk/test/unit/lib/iscsi/tgt_node.c/Makefile
new file mode 100644
index 00000000..8cdf3ef3
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/tgt_node.c/Makefile
@@ -0,0 +1,41 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+SPDK_LIB_LIST = conf
+TEST_FILE = tgt_node_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node.conf b/src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node.conf
new file mode 100644
index 00000000..6bf5aa66
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node.conf
@@ -0,0 +1,95 @@
+[Global]
+
+# Test that parsing fails if there is no TargetName
+[Failure0]
+ TargetAlias "Data Disk1"
+ Mapping PortalGroup1 InitiatorGroup1
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
+
+# Test that parsing fails if there is no Mapping
+[Failure1]
+ TargetName target1
+ TargetAlias "Data Disk1"
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
+
+# Test that parsing fails if Mapping does not define Portal or InitiatorGroup
+[Failure2]
+ TargetName target1
+ TargetAlias "Data Disk1"
+ Mapping
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
+
+# Test that parsing fails if Mapping does not define InitiatorGroup
+[Failure3]
+ TargetName target1
+ TargetAlias "Data Disk1"
+ Mapping PortalGroup1
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
+
+# Test that parsing fails if Mapping switches PortalGroup/InitiatorGroup order
+[Failure4]
+ TargetName target1
+ TargetAlias "Data Disk1"
+ Mapping InitiatorGroup1 PortalGroup1
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
+
+# Test that parsing fails if Mapping uses invalid InitiatorGroup0
+[Failure5]
+ TargetName target1
+ TargetAlias "Data Disk1"
+ Mapping PortalGroup1 InitiatorGroup0
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
+
+# Test that parsing fails if Mapping uses invalid PortalGroup0
+[Failure6]
+ TargetName target1
+ TargetAlias "Data Disk1"
+ Mapping PortalGroup0 InitiatorGroup1
+ AuthMethod Auto
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
+
+# Test that parsing fails if AuthMethod is invalid
+[Failure7]
+ TargetName target1
+ TargetAlias "Data Disk1"
+ Mapping PortalGroup1 InitiatorGroup1
+ AuthMethod SomeGarbage
+ AuthGroup AuthGroup1
+ UseDigest Auto
+ QueueDepth 128
+ LUN0 Malloc0
+ LUN1 Malloc1
diff --git a/src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node_ut.c b/src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node_ut.c
new file mode 100644
index 00000000..eda02db6
--- /dev/null
+++ b/src/spdk/test/unit/lib/iscsi/tgt_node.c/tgt_node_ut.c
@@ -0,0 +1,886 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/scsi.h"
+
+#include "CUnit/Basic.h"
+#include "spdk_internal/mock.h"
+
+#include "../common.c"
+#include "iscsi/tgt_node.c"
+#include "scsi/scsi_internal.h"
+#include "unit/lib/json_mock.c"
+
+struct spdk_iscsi_globals g_spdk_iscsi;
+
+const char *config_file;
+
+DEFINE_STUB(spdk_scsi_dev_get_id,
+ int,
+ (const struct spdk_scsi_dev *dev),
+ 0);
+
+DEFINE_STUB(spdk_scsi_lun_get_bdev_name,
+ const char *,
+ (const struct spdk_scsi_lun *lun),
+ NULL);
+
+DEFINE_STUB(spdk_scsi_lun_get_id,
+ int,
+ (const struct spdk_scsi_lun *lun),
+ 0);
+
+bool
+spdk_sock_is_ipv6(struct spdk_sock *sock)
+{
+ return false;
+}
+
+bool
+spdk_sock_is_ipv4(struct spdk_sock *sock)
+{
+ return false;
+}
+
+struct spdk_iscsi_portal_grp *
+spdk_iscsi_portal_grp_find_by_tag(int tag)
+{
+ return NULL;
+}
+
+struct spdk_iscsi_init_grp *
+spdk_iscsi_init_grp_find_by_tag(int tag)
+{
+ return NULL;
+}
+
+struct spdk_scsi_lun *
+spdk_scsi_dev_get_lun(struct spdk_scsi_dev *dev, int lun_id)
+{
+ if (lun_id < 0 || lun_id >= SPDK_SCSI_DEV_MAX_LUN) {
+ return NULL;
+ }
+
+ return dev->lun[lun_id];
+}
+
+int
+spdk_scsi_dev_add_lun(struct spdk_scsi_dev *dev, const char *bdev_name, int lun_id,
+ void (*hotremove_cb)(const struct spdk_scsi_lun *, void *),
+ void *hotremove_ctx)
+{
+ if (bdev_name == NULL) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+static void
+add_lun_test_cases(void)
+{
+ struct spdk_iscsi_tgt_node tgtnode;
+ int lun_id = 0;
+ char *bdev_name = NULL;
+ struct spdk_scsi_dev scsi_dev;
+ int rc;
+
+ memset(&tgtnode, 0, sizeof(struct spdk_iscsi_tgt_node));
+ memset(&scsi_dev, 0, sizeof(struct spdk_scsi_dev));
+
+ /* case 1 */
+ tgtnode.num_active_conns = 1;
+
+ rc = spdk_iscsi_tgt_node_add_lun(&tgtnode, bdev_name, lun_id);
+ CU_ASSERT(rc != 0);
+
+ /* case 2 */
+ tgtnode.num_active_conns = 0;
+ lun_id = -2;
+
+ rc = spdk_iscsi_tgt_node_add_lun(&tgtnode, bdev_name, lun_id);
+ CU_ASSERT(rc != 0);
+
+ /* case 3 */
+ lun_id = SPDK_SCSI_DEV_MAX_LUN;
+
+ rc = spdk_iscsi_tgt_node_add_lun(&tgtnode, bdev_name, lun_id);
+ CU_ASSERT(rc != 0);
+
+ /* case 4 */
+ lun_id = -1;
+ tgtnode.dev = NULL;
+
+ rc = spdk_iscsi_tgt_node_add_lun(&tgtnode, bdev_name, lun_id);
+ CU_ASSERT(rc != 0);
+
+ /* case 5 */
+ tgtnode.dev = &scsi_dev;
+
+ rc = spdk_iscsi_tgt_node_add_lun(&tgtnode, bdev_name, lun_id);
+ CU_ASSERT(rc != 0);
+
+ /* case 6 */
+ bdev_name = "LUN0";
+
+ rc = spdk_iscsi_tgt_node_add_lun(&tgtnode, bdev_name, lun_id);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+config_file_fail_cases(void)
+{
+ struct spdk_conf *config;
+ struct spdk_conf_section *sp;
+ char section_name[64];
+ int section_index;
+ int rc;
+
+ config = spdk_conf_allocate();
+
+ rc = spdk_conf_read(config, config_file);
+ CU_ASSERT(rc == 0);
+
+ section_index = 0;
+ while (true) {
+ snprintf(section_name, sizeof(section_name), "Failure%d", section_index);
+ sp = spdk_conf_find_section(config, section_name);
+ if (sp == NULL) {
+ break;
+ }
+ rc = spdk_iscsi_parse_tgt_node(sp);
+ CU_ASSERT(rc < 0);
+ section_index++;
+ }
+
+ spdk_conf_free(config);
+}
+
+static void
+allow_any_allowed(void)
+{
+ bool result;
+ char *netmask;
+ char *addr1, *addr2;
+
+ netmask = "ANY";
+ addr1 = "2001:ad6:1234:5678:9abc::";
+ addr2 = "192.168.2.1";
+
+ result = spdk_iscsi_netmask_allow_addr(netmask, addr1);
+ CU_ASSERT(result == true);
+
+ result = spdk_iscsi_netmask_allow_addr(netmask, addr2);
+ CU_ASSERT(result == true);
+}
+
+static void
+allow_ipv6_allowed(void)
+{
+ bool result;
+ char *netmask;
+ char *addr;
+
+ netmask = "[2001:ad6:1234::]/48";
+ addr = "2001:ad6:1234:5678:9abc::";
+
+ result = spdk_iscsi_ipv6_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == true);
+
+ result = spdk_iscsi_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == true);
+
+ /* Netmask prefix bits == 128 (all bits must match) */
+ netmask = "[2001:ad6:1234:5678:9abc::1]/128";
+ addr = "2001:ad6:1234:5678:9abc::1";
+ result = spdk_iscsi_ipv6_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == true);
+}
+
+static void
+allow_ipv6_denied(void)
+{
+ bool result;
+ char *netmask;
+ char *addr;
+
+ netmask = "[2001:ad6:1234::]/56";
+ addr = "2001:ad6:1234:5678:9abc::";
+
+ result = spdk_iscsi_ipv6_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ result = spdk_iscsi_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ /* Netmask prefix bits == 128 (all bits must match) */
+ netmask = "[2001:ad6:1234:5678:9abc::1]/128";
+ addr = "2001:ad6:1234:5678:9abc::2";
+ result = spdk_iscsi_ipv6_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+}
+
+static void
+allow_ipv6_invalid(void)
+{
+ bool result;
+ char *netmask;
+ char *addr;
+
+ /* Netmask prefix bits > 128 */
+ netmask = "[2001:ad6:1234::]/129";
+ addr = "2001:ad6:1234:5678:9abc::";
+ result = spdk_iscsi_ipv6_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ /* Netmask prefix bits == 0 */
+ netmask = "[2001:ad6:1234::]/0";
+ addr = "2001:ad6:1234:5678:9abc::";
+ result = spdk_iscsi_ipv6_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ /* Netmask prefix bits < 0 */
+ netmask = "[2001:ad6:1234::]/-1";
+ addr = "2001:ad6:1234:5678:9abc::";
+ result = spdk_iscsi_ipv6_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+}
+
+static void
+allow_ipv4_allowed(void)
+{
+ bool result;
+ char *netmask;
+ char *addr;
+
+ netmask = "192.168.2.0/24";
+ addr = "192.168.2.1";
+
+ result = spdk_iscsi_ipv4_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == true);
+
+ result = spdk_iscsi_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == true);
+
+ /* Netmask prefix == 32 (all bits must match) */
+ netmask = "192.168.2.1/32";
+ addr = "192.168.2.1";
+ result = spdk_iscsi_ipv4_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == true);
+}
+
+static void
+allow_ipv4_denied(void)
+{
+ bool result;
+ char *netmask;
+ char *addr;
+
+ netmask = "192.168.2.0";
+ addr = "192.168.2.1";
+
+ result = spdk_iscsi_ipv4_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ result = spdk_iscsi_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ /* Netmask prefix == 32 (all bits must match) */
+ netmask = "192.168.2.1/32";
+ addr = "192.168.2.2";
+ result = spdk_iscsi_ipv4_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+}
+
+static void
+allow_ipv4_invalid(void)
+{
+ bool result;
+ char *netmask;
+ char *addr;
+
+ /* Netmask prefix bits > 32 */
+ netmask = "192.168.2.0/33";
+ addr = "192.168.2.1";
+ result = spdk_iscsi_ipv4_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ /* Netmask prefix bits == 0 */
+ netmask = "192.168.2.0/0";
+ addr = "192.168.2.1";
+ result = spdk_iscsi_ipv4_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+
+ /* Netmask prefix bits < 0 */
+ netmask = "192.168.2.0/-1";
+ addr = "192.168.2.1";
+ result = spdk_iscsi_ipv4_netmask_allow_addr(netmask, addr);
+ CU_ASSERT(result == false);
+}
+
+static void
+node_access_allowed(void)
+{
+ struct spdk_iscsi_tgt_node tgtnode;
+ struct spdk_iscsi_portal_grp pg;
+ struct spdk_iscsi_init_grp ig;
+ struct spdk_iscsi_conn conn;
+ struct spdk_iscsi_portal portal;
+ struct spdk_iscsi_initiator_name iname;
+ struct spdk_iscsi_initiator_netmask imask;
+ struct spdk_scsi_dev scsi_dev;
+ struct spdk_iscsi_pg_map *pg_map;
+ char *iqn, *addr;
+ bool result;
+
+ /* portal group initialization */
+ memset(&pg, 0, sizeof(struct spdk_iscsi_portal_grp));
+ pg.tag = 1;
+
+ /* initiator group initialization */
+ memset(&ig, 0, sizeof(struct spdk_iscsi_init_grp));
+ ig.tag = 1;
+
+ ig.ninitiators = 1;
+ iname.name = "iqn.2017-10.spdk.io:0001";
+ TAILQ_INIT(&ig.initiator_head);
+ TAILQ_INSERT_TAIL(&ig.initiator_head, &iname, tailq);
+
+ ig.nnetmasks = 1;
+ imask.mask = "192.168.2.0/24";
+ TAILQ_INIT(&ig.netmask_head);
+ TAILQ_INSERT_TAIL(&ig.netmask_head, &imask, tailq);
+
+ /* target initialization */
+ memset(&tgtnode, 0, sizeof(struct spdk_iscsi_tgt_node));
+ tgtnode.name = "iqn.2017-10.spdk.io:0001";
+ TAILQ_INIT(&tgtnode.pg_map_head);
+
+ memset(&scsi_dev, 0, sizeof(struct spdk_scsi_dev));
+ snprintf(scsi_dev.name, sizeof(scsi_dev.name), "iqn.2017-10.spdk.io:0001");
+ tgtnode.dev = &scsi_dev;
+
+ pg_map = spdk_iscsi_tgt_node_add_pg_map(&tgtnode, &pg);
+ spdk_iscsi_pg_map_add_ig_map(pg_map, &ig);
+
+ /* portal initialization */
+ memset(&portal, 0, sizeof(struct spdk_iscsi_portal));
+ portal.group = &pg;
+ portal.host = "192.168.2.0";
+ portal.port = "3260";
+
+ /* input for UT */
+ memset(&conn, 0, sizeof(struct spdk_iscsi_conn));
+ conn.portal = &portal;
+
+ iqn = "iqn.2017-10.spdk.io:0001";
+ addr = "192.168.2.1";
+
+ result = spdk_iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == true);
+
+ spdk_iscsi_pg_map_delete_ig_map(pg_map, &ig);
+ spdk_iscsi_tgt_node_delete_pg_map(&tgtnode, &pg);
+}
+
+static void
+node_access_denied_by_empty_netmask(void)
+{
+ struct spdk_iscsi_tgt_node tgtnode;
+ struct spdk_iscsi_portal_grp pg;
+ struct spdk_iscsi_init_grp ig;
+ struct spdk_iscsi_conn conn;
+ struct spdk_iscsi_portal portal;
+ struct spdk_iscsi_initiator_name iname;
+ struct spdk_scsi_dev scsi_dev;
+ struct spdk_iscsi_pg_map *pg_map;
+ char *iqn, *addr;
+ bool result;
+
+ /* portal group initialization */
+ memset(&pg, 0, sizeof(struct spdk_iscsi_portal_grp));
+ pg.tag = 1;
+
+ /* initiator group initialization */
+ memset(&ig, 0, sizeof(struct spdk_iscsi_init_grp));
+ ig.tag = 1;
+
+ ig.ninitiators = 1;
+ iname.name = "iqn.2017-10.spdk.io:0001";
+ TAILQ_INIT(&ig.initiator_head);
+ TAILQ_INSERT_TAIL(&ig.initiator_head, &iname, tailq);
+
+ ig.nnetmasks = 0;
+ TAILQ_INIT(&ig.netmask_head);
+
+ /* target initialization */
+ memset(&tgtnode, 0, sizeof(struct spdk_iscsi_tgt_node));
+ tgtnode.name = "iqn.2017-10.spdk.io:0001";
+ TAILQ_INIT(&tgtnode.pg_map_head);
+
+ memset(&scsi_dev, 0, sizeof(struct spdk_scsi_dev));
+ snprintf(scsi_dev.name, sizeof(scsi_dev.name), "iqn.2017-10.spdk.io:0001");
+ tgtnode.dev = &scsi_dev;
+
+ pg_map = spdk_iscsi_tgt_node_add_pg_map(&tgtnode, &pg);
+ spdk_iscsi_pg_map_add_ig_map(pg_map, &ig);
+
+ /* portal initialization */
+ memset(&portal, 0, sizeof(struct spdk_iscsi_portal));
+ portal.group = &pg;
+ portal.host = "192.168.2.0";
+ portal.port = "3260";
+
+ /* input for UT */
+ memset(&conn, 0, sizeof(struct spdk_iscsi_conn));
+ conn.portal = &portal;
+
+ iqn = "iqn.2017-10.spdk.io:0001";
+ addr = "192.168.3.1";
+
+ result = spdk_iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ spdk_iscsi_pg_map_delete_ig_map(pg_map, &ig);
+ spdk_iscsi_tgt_node_delete_pg_map(&tgtnode, &pg);
+}
+
+#define IQN1 "iqn.2017-11.spdk.io:0001"
+#define NO_IQN1 "!iqn.2017-11.spdk.io:0001"
+#define IQN2 "iqn.2017-11.spdk.io:0002"
+#define IP1 "192.168.2.0"
+#define IP2 "192.168.2.1"
+
+static void
+node_access_multi_initiator_groups_cases(void)
+{
+ struct spdk_iscsi_tgt_node tgtnode;
+ struct spdk_iscsi_conn conn;
+ struct spdk_iscsi_portal_grp pg;
+ struct spdk_iscsi_portal portal;
+ struct spdk_iscsi_init_grp ig1, ig2;
+ struct spdk_iscsi_initiator_name iname1, iname2;
+ struct spdk_iscsi_initiator_netmask imask1, imask2;
+ struct spdk_scsi_dev scsi_dev;
+ struct spdk_iscsi_pg_map *pg_map;
+ char *iqn, *addr;
+ bool result;
+
+ /* target initialization */
+ memset(&tgtnode, 0, sizeof(struct spdk_iscsi_tgt_node));
+ tgtnode.name = IQN1;
+ TAILQ_INIT(&tgtnode.pg_map_head);
+
+ memset(&scsi_dev, 0, sizeof(struct spdk_scsi_dev));
+ snprintf(scsi_dev.name, sizeof(scsi_dev.name), IQN1);
+ tgtnode.dev = &scsi_dev;
+
+ /* initiator group initialization */
+ memset(&ig1, 0, sizeof(struct spdk_iscsi_init_grp));
+ ig1.tag = 1;
+ TAILQ_INIT(&ig1.initiator_head);
+ TAILQ_INIT(&ig1.netmask_head);
+
+ ig1.ninitiators = 1;
+ iname1.name = NULL;
+ TAILQ_INSERT_TAIL(&ig1.initiator_head, &iname1, tailq);
+
+ ig1.nnetmasks = 1;
+ imask1.mask = NULL;
+ TAILQ_INSERT_TAIL(&ig1.netmask_head, &imask1, tailq);
+
+ memset(&ig2, 0, sizeof(struct spdk_iscsi_init_grp));
+ ig2.tag = 2;
+ TAILQ_INIT(&ig2.initiator_head);
+ TAILQ_INIT(&ig2.netmask_head);
+
+ ig2.ninitiators = 1;
+ iname2.name = NULL;
+ TAILQ_INSERT_TAIL(&ig2.initiator_head, &iname2, tailq);
+
+ ig2.nnetmasks = 1;
+ imask2.mask = NULL;
+ TAILQ_INSERT_TAIL(&ig2.netmask_head, &imask2, tailq);
+
+ /* portal group initialization */
+ memset(&pg, 0, sizeof(struct spdk_iscsi_portal_grp));
+ pg.tag = 1;
+
+ pg_map = spdk_iscsi_tgt_node_add_pg_map(&tgtnode, &pg);
+ spdk_iscsi_pg_map_add_ig_map(pg_map, &ig1);
+ spdk_iscsi_pg_map_add_ig_map(pg_map, &ig2);
+
+ /* portal initialization */
+ memset(&portal, 0, sizeof(struct spdk_iscsi_portal));
+ portal.group = &pg;
+ portal.host = IP1;
+ portal.port = "3260";
+
+ /* connection initialization */
+ memset(&conn, 0, sizeof(struct spdk_iscsi_conn));
+ conn.portal = &portal;
+
+ iqn = IQN1;
+ addr = IP1;
+
+ /*
+ * case 1:
+ * +-------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +-------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +-------------------------------------------+---------+
+ * +-------------------------------------------+---------+
+ * | denied | - | - | - | denied |
+ * +-------------------------------------------+---------+
+ */
+ iname1.name = NO_IQN1;
+
+ result = spdk_iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ /*
+ * case 2:
+ * +-------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +-------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +-------------------------------------------+---------+
+ * +-------------------------------------------+---------+
+ * | allowed | allowed | - | - | allowed |
+ * +-------------------------------------------+---------+
+ */
+ iname1.name = IQN1;
+ imask1.mask = IP1;
+
+ result = spdk_iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == true);
+
+ /*
+ * case 3:
+ * +-------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +-------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +-------------------------------------------+---------+
+ * +-------------------------------------------+---------+
+ * | allowed | denied | denied | - | denied |
+ * +-------------------------------------------+---------+
+ */
+ iname1.name = IQN1;
+ imask1.mask = IP2;
+ iname2.name = NO_IQN1;
+
+ result = spdk_iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ /*
+ * case 4:
+ * +-------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +-------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +-------------------------------------------+---------+
+ * +-------------------------------------------+---------+
+ * | allowed | denied | allowed | allowed | allowed |
+ * +-------------------------------------------+---------+
+ */
+ iname1.name = IQN1;
+ imask1.mask = IP2;
+ iname2.name = IQN1;
+ imask2.mask = IP1;
+
+ result = spdk_iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == true);
+
+ /*
+ * case 5:
+ * +---------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +---------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +---------------------------------------------+---------+
+ * +---------------------------------------------+---------+
+ * | allowed | denied | allowed | denied | denied |
+ * +---------------------------------------------+---------+
+ */
+ iname1.name = IQN1;
+ imask1.mask = IP2;
+ iname2.name = IQN1;
+ imask2.mask = IP2;
+
+ result = spdk_iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ /*
+ * case 6:
+ * +---------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +---------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +---------------------------------------------+---------+
+ * +---------------------------------------------+---------+
+ * | allowed | denied | not found | - | denied |
+ * +---------------------------------------------+---------+
+ */
+ iname1.name = IQN1;
+ imask1.mask = IP2;
+ iname2.name = IQN2;
+
+ result = spdk_iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ /*
+ * case 7:
+ * +---------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +---------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +---------------------------------------------+---------+
+ * +---------------------------------------------+---------+
+ * | not found | - | denied | - | denied |
+ * +---------------------------------------------+---------+
+ */
+ iname1.name = IQN2;
+ iname2.name = NO_IQN1;
+
+ result = spdk_iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ /*
+ * case 8:
+ * +---------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +---------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +---------------------------------------------+---------+
+ * +---------------------------------------------+---------+
+ * | not found | - | allowed | allowed | allowed |
+ * +---------------------------------------------+---------+
+ */
+ iname1.name = IQN2;
+ iname2.name = IQN1;
+ imask2.mask = IP1;
+
+ result = spdk_iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == true);
+
+ /*
+ * case 9:
+ * +---------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +---------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +---------------------------------------------+---------+
+ * +---------------------------------------------+---------+
+ * | not found | - | allowed | denied | denied |
+ * +---------------------------------------------+---------+
+ */
+ iname1.name = IQN2;
+ iname2.name = IQN1;
+ imask2.mask = IP2;
+
+ result = spdk_iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ /*
+ * case 10:
+ * +---------------------------------------------+---------+
+ * | IG1 | IG2 | |
+ * +---------------------------------------------+ |
+ * | name | addr | name | addr | result |
+ * +---------------------------------------------+---------+
+ * +---------------------------------------------+---------+
+ * | not found | - | not found | - | denied |
+ * +---------------------------------------------+---------+
+ */
+ iname1.name = IQN2;
+ iname2.name = IQN2;
+
+ result = spdk_iscsi_tgt_node_access(&conn, &tgtnode, iqn, addr);
+ CU_ASSERT(result == false);
+
+ spdk_iscsi_pg_map_delete_ig_map(pg_map, &ig1);
+ spdk_iscsi_pg_map_delete_ig_map(pg_map, &ig2);
+ spdk_iscsi_tgt_node_delete_pg_map(&tgtnode, &pg);
+}
+
+static void
+allow_iscsi_name_multi_maps_case(void)
+{
+ struct spdk_iscsi_tgt_node tgtnode;
+ struct spdk_iscsi_portal_grp pg1, pg2;
+ struct spdk_iscsi_init_grp ig;
+ struct spdk_iscsi_initiator_name iname;
+ struct spdk_iscsi_pg_map *pg_map1, *pg_map2;
+ struct spdk_scsi_dev scsi_dev;
+ char *iqn;
+ bool result;
+
+ /* target initialization */
+ memset(&tgtnode, 0, sizeof(struct spdk_iscsi_tgt_node));
+ TAILQ_INIT(&tgtnode.pg_map_head);
+
+ memset(&scsi_dev, 0, sizeof(struct spdk_scsi_dev));
+ snprintf(scsi_dev.name, sizeof(scsi_dev.name), IQN1);
+ tgtnode.dev = &scsi_dev;
+
+ /* initiator group initialization */
+ memset(&ig, 0, sizeof(struct spdk_iscsi_init_grp));
+ TAILQ_INIT(&ig.initiator_head);
+
+ ig.ninitiators = 1;
+ iname.name = NULL;
+ TAILQ_INSERT_TAIL(&ig.initiator_head, &iname, tailq);
+
+ /* portal group initialization */
+ memset(&pg1, 0, sizeof(struct spdk_iscsi_portal_grp));
+ pg1.tag = 1;
+ memset(&pg2, 0, sizeof(struct spdk_iscsi_portal_grp));
+ pg2.tag = 1;
+
+ pg_map1 = spdk_iscsi_tgt_node_add_pg_map(&tgtnode, &pg1);
+ pg_map2 = spdk_iscsi_tgt_node_add_pg_map(&tgtnode, &pg2);
+ spdk_iscsi_pg_map_add_ig_map(pg_map1, &ig);
+ spdk_iscsi_pg_map_add_ig_map(pg_map2, &ig);
+
+ /* test for IG1 <-> PG1, PG2 case */
+ iqn = IQN1;
+
+ iname.name = IQN1;
+
+ result = spdk_iscsi_tgt_node_allow_iscsi_name(&tgtnode, iqn);
+ CU_ASSERT(result == true);
+
+ iname.name = IQN2;
+
+ result = spdk_iscsi_tgt_node_allow_iscsi_name(&tgtnode, iqn);
+ CU_ASSERT(result == false);
+
+ spdk_iscsi_pg_map_delete_ig_map(pg_map1, &ig);
+ spdk_iscsi_pg_map_delete_ig_map(pg_map2, &ig);
+ spdk_iscsi_tgt_node_delete_pg_map(&tgtnode, &pg1);
+ spdk_iscsi_tgt_node_delete_pg_map(&tgtnode, &pg2);
+}
+
+/*
+ * static bool
+ * spdk_iscsi_check_chap_params(bool disable_chap, bool require_chap,
+ * bool mutual_chap, int chap_group);
+ */
+static void
+chap_param_test_cases(void)
+{
+ /* Auto */
+ CU_ASSERT(spdk_iscsi_check_chap_params(false, false, false, 0) == true);
+
+ /* None */
+ CU_ASSERT(spdk_iscsi_check_chap_params(true, false, false, 0) == true);
+
+ /* CHAP */
+ CU_ASSERT(spdk_iscsi_check_chap_params(false, true, false, 0) == true);
+
+ /* CHAP Mutual */
+ CU_ASSERT(spdk_iscsi_check_chap_params(false, true, true, 0) == true);
+
+ /* Check mutual exclusiveness of disabled and required */
+ CU_ASSERT(spdk_iscsi_check_chap_params(true, true, false, 0) == false);
+
+ /* Mutual requires Required */
+ CU_ASSERT(spdk_iscsi_check_chap_params(false, false, true, 0) == false);
+
+ /* Remaining combinations */
+ CU_ASSERT(spdk_iscsi_check_chap_params(true, false, true, 0) == false);
+ CU_ASSERT(spdk_iscsi_check_chap_params(true, true, true, 0) == false);
+
+ /* Valid auth group ID */
+ CU_ASSERT(spdk_iscsi_check_chap_params(false, false, false, 1) == true);
+
+ /* Invalid auth group ID */
+ CU_ASSERT(spdk_iscsi_check_chap_params(false, false, false, -1) == false);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (argc < 2) {
+ fprintf(stderr, "usage: %s <config file>\n", argv[0]);
+ exit(1);
+ }
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ config_file = argv[1];
+
+ suite = CU_add_suite("iscsi_target_node_suite", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "add lun test cases", add_lun_test_cases) == NULL
+ || CU_add_test(suite, "config file fail cases", config_file_fail_cases) == NULL
+ || CU_add_test(suite, "allow any allowed case", allow_any_allowed) == NULL
+ || CU_add_test(suite, "allow ipv6 allowed case", allow_ipv6_allowed) == NULL
+ || CU_add_test(suite, "allow ipv6 denied case", allow_ipv6_denied) == NULL
+ || CU_add_test(suite, "allow ipv6 invalid case", allow_ipv6_invalid) == NULL
+ || CU_add_test(suite, "allow ipv4 allowed case", allow_ipv4_allowed) == NULL
+ || CU_add_test(suite, "allow ipv4 denied case", allow_ipv4_denied) == NULL
+ || CU_add_test(suite, "allow ipv4 invalid case", allow_ipv4_invalid) == NULL
+ || CU_add_test(suite, "node access allowed case", node_access_allowed) == NULL
+ || CU_add_test(suite, "node access denied case (empty netmask)",
+ node_access_denied_by_empty_netmask) == NULL
+ || CU_add_test(suite, "node access multiple initiator groups cases",
+ node_access_multi_initiator_groups_cases) == NULL
+ || CU_add_test(suite, "allow iscsi name case",
+ allow_iscsi_name_multi_maps_case) == NULL
+ || CU_add_test(suite, "chap param test cases", chap_param_test_cases) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/json/Makefile b/src/spdk/test/unit/lib/json/Makefile
new file mode 100644
index 00000000..db38f27d
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = json_parse.c json_util.c json_write.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/json/json_parse.c/.gitignore b/src/spdk/test/unit/lib/json/json_parse.c/.gitignore
new file mode 100644
index 00000000..2b4445fd
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_parse.c/.gitignore
@@ -0,0 +1 @@
+json_parse_ut
diff --git a/src/spdk/test/unit/lib/json/json_parse.c/Makefile b/src/spdk/test/unit/lib/json/json_parse.c/Makefile
new file mode 100644
index 00000000..3d410024
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_parse.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = json_parse_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/json/json_parse.c/json_parse_ut.c b/src/spdk/test/unit/lib/json/json_parse.c/json_parse_ut.c
new file mode 100644
index 00000000..dae80476
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_parse.c/json_parse_ut.c
@@ -0,0 +1,940 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "json/json_parse.c"
+
+static uint8_t g_buf[1000];
+static void *g_end;
+static struct spdk_json_val g_vals[100];
+static int g_cur_val;
+
+/* Fill buf with raw data */
+#define BUF_SETUP(in) \
+ memset(g_buf, 0, sizeof(g_buf)); \
+ if (sizeof(in) > 1) { \
+ memcpy(g_buf, in, sizeof(in) - 1); \
+ } \
+ g_end = NULL
+
+/*
+ * Do two checks - first pass NULL for values to ensure the count is correct,
+ * then pass g_vals to get the actual values.
+ */
+#define PARSE_PASS_FLAGS(in, num_vals, trailing, flags) \
+ BUF_SETUP(in); \
+ CU_ASSERT(spdk_json_parse(g_buf, sizeof(in) - 1, NULL, 0, &g_end, flags) == num_vals); \
+ memset(g_vals, 0, sizeof(g_vals)); \
+ CU_ASSERT(spdk_json_parse(g_buf, sizeof(in) - 1, g_vals, sizeof(g_vals), &g_end, flags | SPDK_JSON_PARSE_FLAG_DECODE_IN_PLACE) == num_vals); \
+ CU_ASSERT(g_end == g_buf + sizeof(in) - sizeof(trailing)); \
+ CU_ASSERT(memcmp(g_end, trailing, sizeof(trailing) - 1) == 0); \
+ g_cur_val = 0
+
+#define PARSE_PASS(in, num_vals, trailing) \
+ PARSE_PASS_FLAGS(in, num_vals, trailing, 0)
+
+#define PARSE_FAIL_FLAGS(in, retval, flags) \
+ BUF_SETUP(in); \
+ CU_ASSERT(spdk_json_parse(g_buf, sizeof(in) - 1, NULL, 0, &g_end, flags) == retval)
+
+#define PARSE_FAIL(in, retval) \
+ PARSE_FAIL_FLAGS(in, retval, 0)
+
+#define VAL_STRING_MATCH(str, var_type) \
+ CU_ASSERT(g_vals[g_cur_val].type == var_type); \
+ CU_ASSERT(g_vals[g_cur_val].len == sizeof(str) - 1); \
+ if (g_vals[g_cur_val].len == sizeof(str) - 1 && sizeof(str) > 1) { \
+ CU_ASSERT(memcmp(g_vals[g_cur_val].start, str, g_vals[g_cur_val].len) == 0); \
+ } \
+ g_cur_val++
+
+#define VAL_STRING(str) VAL_STRING_MATCH(str, SPDK_JSON_VAL_STRING)
+#define VAL_NAME(str) VAL_STRING_MATCH(str, SPDK_JSON_VAL_NAME)
+#define VAL_NUMBER(num) VAL_STRING_MATCH(num, SPDK_JSON_VAL_NUMBER)
+
+#define VAL_LITERAL(str, val_type) \
+ CU_ASSERT(g_vals[g_cur_val].type == val_type); \
+ CU_ASSERT(g_vals[g_cur_val].len == strlen(str)); \
+ if (g_vals[g_cur_val].len == strlen(str)) { \
+ CU_ASSERT(memcmp(g_vals[g_cur_val].start, str, g_vals[g_cur_val].len) == 0); \
+ } \
+ g_cur_val++
+
+#define VAL_TRUE() VAL_LITERAL("true", SPDK_JSON_VAL_TRUE)
+#define VAL_FALSE() VAL_LITERAL("false", SPDK_JSON_VAL_FALSE)
+#define VAL_NULL() VAL_LITERAL("null", SPDK_JSON_VAL_NULL)
+
+#define VAL_ARRAY_BEGIN(count) \
+ CU_ASSERT(g_vals[g_cur_val].type == SPDK_JSON_VAL_ARRAY_BEGIN); \
+ CU_ASSERT(g_vals[g_cur_val].len == count); \
+ g_cur_val++
+
+#define VAL_ARRAY_END() \
+ CU_ASSERT(g_vals[g_cur_val].type == SPDK_JSON_VAL_ARRAY_END); \
+ g_cur_val++
+
+#define VAL_OBJECT_BEGIN(count) \
+ CU_ASSERT(g_vals[g_cur_val].type == SPDK_JSON_VAL_OBJECT_BEGIN); \
+ CU_ASSERT(g_vals[g_cur_val].len == count); \
+ g_cur_val++
+
+#define VAL_OBJECT_END() \
+ CU_ASSERT(g_vals[g_cur_val].type == SPDK_JSON_VAL_OBJECT_END); \
+ g_cur_val++
+
+/* Simplified macros for string-only testing */
+#define STR_PASS(in, out) \
+ PARSE_PASS("\"" in "\"", 1, ""); \
+ VAL_STRING(out)
+
+#define STR_FAIL(in, retval) \
+ PARSE_FAIL("\"" in "\"", retval)
+
+/* Simplified macros for number-only testing (no whitespace allowed) */
+#define NUM_PASS(in) \
+ PARSE_PASS(in, 1, ""); \
+ VAL_NUMBER(in)
+
+#define NUM_FAIL(in, retval) \
+ PARSE_FAIL(in, retval)
+
+static void
+test_parse_literal(void)
+{
+ PARSE_PASS("true", 1, "");
+ VAL_TRUE();
+
+ PARSE_PASS(" true ", 1, "");
+ VAL_TRUE();
+
+ PARSE_PASS("false", 1, "");
+ VAL_FALSE();
+
+ PARSE_PASS("null", 1, "");
+ VAL_NULL();
+
+ PARSE_PASS("trueaaa", 1, "aaa");
+ VAL_TRUE();
+
+ PARSE_PASS("truefalse", 1, "false");
+ VAL_TRUE();
+
+ PARSE_PASS("true false", 1, "false");
+ VAL_TRUE();
+
+ PARSE_PASS("true,false", 1, ",false");
+ VAL_TRUE();
+
+ PARSE_PASS("true,", 1, ",");
+ VAL_TRUE();
+
+ PARSE_FAIL("True", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("abcdef", SPDK_JSON_PARSE_INVALID);
+
+ PARSE_FAIL("t", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("tru", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("f", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("fals", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("n", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("nul", SPDK_JSON_PARSE_INCOMPLETE);
+
+ PARSE_FAIL("taaaaa", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("faaaaa", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("naaaaa", SPDK_JSON_PARSE_INVALID);
+}
+
+static void
+test_parse_string_simple(void)
+{
+ PARSE_PASS("\"\"", 1, "");
+ VAL_STRING("");
+
+ PARSE_PASS("\"hello world\"", 1, "");
+ VAL_STRING("hello world");
+
+ PARSE_PASS(" \"hello world\" ", 1, "");
+ VAL_STRING("hello world");
+
+ /* Unterminated string */
+ PARSE_FAIL("\"hello world", SPDK_JSON_PARSE_INCOMPLETE);
+
+ /* Trailing comma */
+ PARSE_PASS("\"hello world\",", 1, ",");
+ VAL_STRING("hello world");
+}
+
+static void
+test_parse_string_control_chars(void)
+{
+ /* U+0000 through U+001F must be escaped */
+ STR_FAIL("\x00", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x01", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x02", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x03", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x04", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x05", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x06", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x07", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x08", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x09", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x0A", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x0B", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x0C", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x0D", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x0E", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x0F", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x10", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x11", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x12", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x13", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x14", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x15", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x16", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x17", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x18", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x19", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x1A", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x1B", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x1C", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x1D", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x1E", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\x1F", SPDK_JSON_PARSE_INVALID);
+ STR_PASS(" ", " "); /* \x20 (first valid unescaped char) */
+
+ /* Test control chars in the middle of a string */
+ STR_FAIL("abc\ndef", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("abc\tdef", SPDK_JSON_PARSE_INVALID);
+}
+
+static void
+test_parse_string_utf8(void)
+{
+ /* Valid one-, two-, three-, and four-byte sequences */
+ STR_PASS("\x41", "A");
+ STR_PASS("\xC3\xB6", "\xC3\xB6");
+ STR_PASS("\xE2\x88\x9A", "\xE2\x88\x9A");
+ STR_PASS("\xF0\xA0\x9C\x8E", "\xF0\xA0\x9C\x8E");
+
+ /* Examples from RFC 3629 */
+ STR_PASS("\x41\xE2\x89\xA2\xCE\x91\x2E", "\x41\xE2\x89\xA2\xCE\x91\x2E");
+ STR_PASS("\xED\x95\x9C\xEA\xB5\xAD\xEC\x96\xB4", "\xED\x95\x9C\xEA\xB5\xAD\xEC\x96\xB4");
+ STR_PASS("\xE6\x97\xA5\xE6\x9C\xAC\xE8\xAA\x9E", "\xE6\x97\xA5\xE6\x9C\xAC\xE8\xAA\x9E");
+ STR_PASS("\xEF\xBB\xBF\xF0\xA3\x8E\xB4", "\xEF\xBB\xBF\xF0\xA3\x8E\xB4");
+
+ /* Edge cases */
+ STR_PASS("\x7F", "\x7F");
+ STR_FAIL("\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xC1", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xC2", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xC2\x80", "\xC2\x80");
+ STR_PASS("\xC2\xBF", "\xC2\xBF");
+ STR_PASS("\xDF\x80", "\xDF\x80");
+ STR_PASS("\xDF\xBF", "\xDF\xBF");
+ STR_FAIL("\xDF", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE0\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE0\x1F", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE0\x1F\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE0", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE0\xA0", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xE0\xA0\x80", "\xE0\xA0\x80");
+ STR_PASS("\xE0\xA0\xBF", "\xE0\xA0\xBF");
+ STR_FAIL("\xE0\xA0\xC0", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xE0\xBF\x80", "\xE0\xBF\x80");
+ STR_PASS("\xE0\xBF\xBF", "\xE0\xBF\xBF");
+ STR_FAIL("\xE0\xC0\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE1", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE1\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE1\x7F\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE1\x80\x7F", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xE1\x80\x80", "\xE1\x80\x80");
+ STR_PASS("\xE1\x80\xBF", "\xE1\x80\xBF");
+ STR_PASS("\xE1\xBF\x80", "\xE1\xBF\x80");
+ STR_PASS("\xE1\xBF\xBF", "\xE1\xBF\xBF");
+ STR_FAIL("\xE1\xC0\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xE1\x80\xC0", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xEF\x80\x80", "\xEF\x80\x80");
+ STR_PASS("\xEF\xBF\xBF", "\xEF\xBF\xBF");
+ STR_FAIL("\xF0", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF0\x90", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF0\x90\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF0\x80\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF0\x8F\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xF0\x90\x80\x80", "\xF0\x90\x80\x80");
+ STR_PASS("\xF0\x90\x80\xBF", "\xF0\x90\x80\xBF");
+ STR_PASS("\xF0\x90\xBF\x80", "\xF0\x90\xBF\x80");
+ STR_PASS("\xF0\xBF\x80\x80", "\xF0\xBF\x80\x80");
+ STR_FAIL("\xF0\xC0\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF1", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF1\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF1\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF1\x80\x80\x7F", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xF1\x80\x80\x80", "\xF1\x80\x80\x80");
+ STR_PASS("\xF1\x80\x80\xBF", "\xF1\x80\x80\xBF");
+ STR_PASS("\xF1\x80\xBF\x80", "\xF1\x80\xBF\x80");
+ STR_PASS("\xF1\xBF\x80\x80", "\xF1\xBF\x80\x80");
+ STR_PASS("\xF3\x80\x80\x80", "\xF3\x80\x80\x80");
+ STR_FAIL("\xF3\xC0\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF3\x80\xC0\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF3\x80\x80\xC0", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF4", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF4\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF4\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_PASS("\xF4\x80\x80\x80", "\xF4\x80\x80\x80");
+ STR_PASS("\xF4\x8F\x80\x80", "\xF4\x8F\x80\x80");
+ STR_PASS("\xF4\x8F\xBF\xBF", "\xF4\x8F\xBF\xBF");
+ STR_FAIL("\xF4\x90\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF5", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF5\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF5\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF5\x80\x80\x80", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\xF5\x80\x80\x80\x80", SPDK_JSON_PARSE_INVALID);
+
+ /* Overlong encodings */
+ STR_FAIL("\xC0\x80", SPDK_JSON_PARSE_INVALID);
+
+ /* Surrogate pairs */
+ STR_FAIL("\xED\xA0\x80", SPDK_JSON_PARSE_INVALID); /* U+D800 First high surrogate */
+ STR_FAIL("\xED\xAF\xBF", SPDK_JSON_PARSE_INVALID); /* U+DBFF Last high surrogate */
+ STR_FAIL("\xED\xB0\x80", SPDK_JSON_PARSE_INVALID); /* U+DC00 First low surrogate */
+ STR_FAIL("\xED\xBF\xBF", SPDK_JSON_PARSE_INVALID); /* U+DFFF Last low surrogate */
+ STR_FAIL("\xED\xA1\x8C\xED\xBE\xB4",
+ SPDK_JSON_PARSE_INVALID); /* U+233B4 (invalid surrogate pair encoding) */
+}
+
+static void
+test_parse_string_escapes_twochar(void)
+{
+ STR_PASS("\\\"", "\"");
+ STR_PASS("\\\\", "\\");
+ STR_PASS("\\/", "/");
+ STR_PASS("\\b", "\b");
+ STR_PASS("\\f", "\f");
+ STR_PASS("\\n", "\n");
+ STR_PASS("\\r", "\r");
+ STR_PASS("\\t", "\t");
+
+ STR_PASS("abc\\tdef", "abc\tdef");
+ STR_PASS("abc\\\"def", "abc\"def");
+
+ /* Backslash at end of string (will be treated as escaped quote) */
+ STR_FAIL("\\", SPDK_JSON_PARSE_INCOMPLETE);
+ STR_FAIL("abc\\", SPDK_JSON_PARSE_INCOMPLETE);
+
+ /* Invalid C-like escapes */
+ STR_FAIL("\\a", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\v", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\'", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\?", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\0", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\x00", SPDK_JSON_PARSE_INVALID);
+
+ /* Other invalid escapes */
+ STR_FAIL("\\B", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\z", SPDK_JSON_PARSE_INVALID);
+}
+
+static void
+test_parse_string_escapes_unicode(void)
+{
+ STR_PASS("\\u0000", "\0");
+ STR_PASS("\\u0001", "\1");
+ STR_PASS("\\u0041", "A");
+ STR_PASS("\\uAAAA", "\xEA\xAA\xAA");
+ STR_PASS("\\uaaaa", "\xEA\xAA\xAA");
+ STR_PASS("\\uAaAa", "\xEA\xAA\xAA");
+
+ STR_FAIL("\\u", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\u0", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\u00", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\u000", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\u000g", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\U", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\U0000", SPDK_JSON_PARSE_INVALID);
+
+ PARSE_FAIL("\"\\u", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("\"\\u0", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("\"\\u00", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("\"\\u000", SPDK_JSON_PARSE_INCOMPLETE);
+
+ /* Surrogate pair */
+ STR_PASS("\\uD834\\uDD1E", "\xF0\x9D\x84\x9E");
+
+ /* Low surrogate without high */
+ STR_FAIL("\\uDC00", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\uDC00\\uDC00", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\uDC00abcdef", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\uDEAD", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("\"\\uD834", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("\"\\uD834\\", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("\"\\uD834\\u", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("\"\\uD834\\uD", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("\"\\uD834\\uDD1", SPDK_JSON_PARSE_INCOMPLETE);
+
+ /* High surrogate without low */
+ STR_FAIL("\\uD800", SPDK_JSON_PARSE_INVALID);
+ STR_FAIL("\\uD800abcdef", SPDK_JSON_PARSE_INVALID);
+
+ /* High surrogate followed by high surrogate */
+ STR_FAIL("\\uD800\\uD800", SPDK_JSON_PARSE_INVALID);
+}
+
+static void
+test_parse_number(void)
+{
+ NUM_PASS("0");
+ NUM_PASS("1");
+ NUM_PASS("100");
+ NUM_PASS("-1");
+ NUM_PASS("-0");
+ NUM_PASS("3.0");
+ NUM_PASS("3.00");
+ NUM_PASS("3.001");
+ NUM_PASS("3.14159");
+ NUM_PASS("3.141592653589793238462643383279");
+ NUM_PASS("1e400");
+ NUM_PASS("1E400");
+ NUM_PASS("0e10");
+ NUM_PASS("0e0");
+ NUM_PASS("-0e0");
+ NUM_PASS("-0e+0");
+ NUM_PASS("-0e-0");
+ NUM_PASS("1e+400");
+ NUM_PASS("1e-400");
+ NUM_PASS("6.022e23");
+ NUM_PASS("-1.234e+56");
+ NUM_PASS("1.23e+56");
+ NUM_PASS("-1.23e-56");
+ NUM_PASS("1.23e-56");
+ NUM_PASS("1e04");
+
+ /* Trailing garbage */
+ PARSE_PASS("0A", 1, "A");
+ VAL_NUMBER("0");
+
+ PARSE_PASS("0,", 1, ",");
+ VAL_NUMBER("0");
+
+ PARSE_PASS("0true", 1, "true");
+ VAL_NUMBER("0");
+
+ PARSE_PASS("00", 1, "0");
+ VAL_NUMBER("0");
+ PARSE_FAIL("[00", SPDK_JSON_PARSE_INVALID);
+
+ PARSE_PASS("007", 1, "07");
+ VAL_NUMBER("0");
+ PARSE_FAIL("[007]", SPDK_JSON_PARSE_INVALID);
+
+ PARSE_PASS("345.678.1", 1, ".1");
+ VAL_NUMBER("345.678");
+ PARSE_FAIL("[345.678.1]", SPDK_JSON_PARSE_INVALID);
+
+ PARSE_PASS("3.2e-4+5", 1, "+5");
+ VAL_NUMBER("3.2e-4");
+ PARSE_FAIL("[3.2e-4+5]", SPDK_JSON_PARSE_INVALID);
+
+ PARSE_PASS("3.4.5", 1, ".5");
+ VAL_NUMBER("3.4");
+ PARSE_FAIL("[3.4.5]", SPDK_JSON_PARSE_INVALID);
+
+ NUM_FAIL("345.", SPDK_JSON_PARSE_INCOMPLETE);
+ NUM_FAIL("+1", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL("--1", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL("3.", SPDK_JSON_PARSE_INCOMPLETE);
+ NUM_FAIL("3.+4", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL("3.2e+-4", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL("3.2e-+4", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL("3e+", SPDK_JSON_PARSE_INCOMPLETE);
+ NUM_FAIL("3e-", SPDK_JSON_PARSE_INCOMPLETE);
+ NUM_FAIL("3.e4", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL("3.2eX", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL("-", SPDK_JSON_PARSE_INCOMPLETE);
+ NUM_FAIL("NaN", SPDK_JSON_PARSE_INVALID);
+ NUM_FAIL(".123", SPDK_JSON_PARSE_INVALID);
+}
+
+static void
+test_parse_array(void)
+{
+ char buffer[SPDK_JSON_MAX_NESTING_DEPTH + 2] = {0};
+
+ PARSE_PASS("[]", 2, "");
+ VAL_ARRAY_BEGIN(0);
+ VAL_ARRAY_END();
+
+ PARSE_PASS("[true]", 3, "");
+ VAL_ARRAY_BEGIN(1);
+ VAL_TRUE();
+ VAL_ARRAY_END();
+
+ PARSE_PASS("[true, false]", 4, "");
+ VAL_ARRAY_BEGIN(2);
+ VAL_TRUE();
+ VAL_FALSE();
+ VAL_ARRAY_END();
+
+ PARSE_PASS("[\"hello\"]", 3, "");
+ VAL_ARRAY_BEGIN(1);
+ VAL_STRING("hello");
+ VAL_ARRAY_END();
+
+ PARSE_PASS("[[]]", 4, "");
+ VAL_ARRAY_BEGIN(2);
+ VAL_ARRAY_BEGIN(0);
+ VAL_ARRAY_END();
+ VAL_ARRAY_END();
+
+ PARSE_PASS("[\"hello\", \"world\"]", 4, "");
+ VAL_ARRAY_BEGIN(2);
+ VAL_STRING("hello");
+ VAL_STRING("world");
+ VAL_ARRAY_END();
+
+ PARSE_PASS("[],", 2, ",");
+ VAL_ARRAY_BEGIN(0);
+ VAL_ARRAY_END();
+
+ PARSE_FAIL("]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("[", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("[true", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("[\"hello", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("[\"hello\"", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("[true,]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("[,]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("[,true]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("[true}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("[true,,true]", SPDK_JSON_PARSE_INVALID);
+
+ /* Nested arrays exactly up to the allowed nesting depth */
+ memset(buffer, '[', SPDK_JSON_MAX_NESTING_DEPTH);
+ buffer[SPDK_JSON_MAX_NESTING_DEPTH] = ' ';
+ PARSE_FAIL(buffer, SPDK_JSON_PARSE_INCOMPLETE);
+
+ /* Nested arrays exceeding the maximum allowed nesting depth for this implementation */
+ buffer[SPDK_JSON_MAX_NESTING_DEPTH] = '[';
+ PARSE_FAIL(buffer, SPDK_JSON_PARSE_MAX_DEPTH_EXCEEDED);
+}
+
+static void
+test_parse_object(void)
+{
+ PARSE_PASS("{}", 2, "");
+ VAL_OBJECT_BEGIN(0);
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"a\": true}", 4, "");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("a");
+ VAL_TRUE();
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"abc\": \"def\"}", 4, "");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("abc");
+ VAL_STRING("def");
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"a\": true, \"b\": false}", 6, "");
+ VAL_OBJECT_BEGIN(4);
+ VAL_NAME("a");
+ VAL_TRUE();
+ VAL_NAME("b");
+ VAL_FALSE();
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"a\": { \"b\": true } }", 7, "");
+ VAL_OBJECT_BEGIN(5);
+ VAL_NAME("a");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("b");
+ VAL_TRUE();
+ VAL_OBJECT_END();
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"{test\": 0}", 4, "");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("{test");
+ VAL_NUMBER("0");
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"test}\": 1}", 4, "");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("test}");
+ VAL_NUMBER("1");
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"\\\"\": 2}", 4, "");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("\"");
+ VAL_NUMBER("2");
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"a\":true},", 4, ",");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("a");
+ VAL_TRUE();
+ VAL_OBJECT_END();
+
+ /* Object end without object begin (trailing garbage) */
+ PARSE_PASS("true}", 1, "}");
+ VAL_TRUE();
+
+ PARSE_PASS("0}", 1, "}");
+ VAL_NUMBER("0");
+
+ PARSE_PASS("\"a\"}", 1, "}");
+ VAL_STRING("a");
+
+ PARSE_FAIL("}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a\"", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a\":", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a\":true", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a\":true,", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a\":true]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{\"a\":true,}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{\"a\":true,\"}", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a\":true,\"b}", SPDK_JSON_PARSE_INCOMPLETE);
+ PARSE_FAIL("{\"a\":true,\"b\"}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{\"a\":true,\"b\":}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{\"a\":true,\"b\",}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{\"a\",}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{,\"a\": true}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{a:true}", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{'a':true}", SPDK_JSON_PARSE_INVALID);
+}
+
+static void
+test_parse_nesting(void)
+{
+ PARSE_PASS("[[[[[[[[]]]]]]]]", 16, "");
+
+ PARSE_PASS("{\"a\": [0, 1, 2]}", 8, "");
+ VAL_OBJECT_BEGIN(6);
+ VAL_NAME("a");
+ VAL_ARRAY_BEGIN(3);
+ VAL_NUMBER("0");
+ VAL_NUMBER("1");
+ VAL_NUMBER("2");
+ VAL_ARRAY_END();
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"a\": [0, 1, 2], \"b\": 3 }", 10, "");
+ VAL_OBJECT_BEGIN(8);
+ VAL_NAME("a");
+ VAL_ARRAY_BEGIN(3);
+ VAL_NUMBER("0");
+ VAL_NUMBER("1");
+ VAL_NUMBER("2");
+ VAL_ARRAY_END();
+ VAL_NAME("b");
+ VAL_NUMBER("3");
+ VAL_OBJECT_END();
+
+ PARSE_PASS("[0, 1, {\"a\": 3}, 4, 5]", 10, "");
+ VAL_ARRAY_BEGIN(8);
+ VAL_NUMBER("0");
+ VAL_NUMBER("1");
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("a");
+ VAL_NUMBER("3");
+ VAL_OBJECT_END();
+ VAL_NUMBER("4");
+ VAL_NUMBER("5");
+ VAL_ARRAY_END();
+
+ PARSE_PASS("\t[ { \"a\": {\"b\": [ {\"c\": 1}, 2 ],\n\"d\": 3}, \"e\" : 4}, 5 ] ", 20, "");
+ VAL_ARRAY_BEGIN(18);
+ VAL_OBJECT_BEGIN(15);
+ VAL_NAME("a");
+ VAL_OBJECT_BEGIN(10);
+ VAL_NAME("b");
+ VAL_ARRAY_BEGIN(5);
+ VAL_OBJECT_BEGIN(2);
+ VAL_NAME("c");
+ VAL_NUMBER("1");
+ VAL_OBJECT_END();
+ VAL_NUMBER("2");
+ VAL_ARRAY_END();
+ VAL_NAME("d");
+ VAL_NUMBER("3");
+ VAL_OBJECT_END();
+ VAL_NAME("e");
+ VAL_NUMBER("4");
+ VAL_OBJECT_END();
+ VAL_NUMBER("5");
+ VAL_ARRAY_END();
+
+ /* Examples from RFC 7159 */
+ PARSE_PASS(
+ "{\n"
+ " \"Image\": {\n"
+ " \"Width\": 800,\n"
+ " \"Height\": 600,\n"
+ " \"Title\": \"View from 15th Floor\",\n"
+ " \"Thumbnail\": {\n"
+ " \"Url\": \"http://www.example.com/image/481989943\",\n"
+ " \"Height\": 125,\n"
+ " \"Width\": 100\n"
+ " },\n"
+ " \"Animated\" : false,\n"
+ " \"IDs\": [116, 943, 234, 38793]\n"
+ " }\n"
+ "}\n",
+ 29, "");
+
+ VAL_OBJECT_BEGIN(27);
+ VAL_NAME("Image");
+ VAL_OBJECT_BEGIN(24);
+ VAL_NAME("Width");
+ VAL_NUMBER("800");
+ VAL_NAME("Height");
+ VAL_NUMBER("600");
+ VAL_NAME("Title");
+ VAL_STRING("View from 15th Floor");
+ VAL_NAME("Thumbnail");
+ VAL_OBJECT_BEGIN(6);
+ VAL_NAME("Url");
+ VAL_STRING("http://www.example.com/image/481989943");
+ VAL_NAME("Height");
+ VAL_NUMBER("125");
+ VAL_NAME("Width");
+ VAL_NUMBER("100");
+ VAL_OBJECT_END();
+ VAL_NAME("Animated");
+ VAL_FALSE();
+ VAL_NAME("IDs");
+ VAL_ARRAY_BEGIN(4);
+ VAL_NUMBER("116");
+ VAL_NUMBER("943");
+ VAL_NUMBER("234");
+ VAL_NUMBER("38793");
+ VAL_ARRAY_END();
+ VAL_OBJECT_END();
+ VAL_OBJECT_END();
+
+ PARSE_PASS(
+ "[\n"
+ " {\n"
+ " \"precision\": \"zip\",\n"
+ " \"Latitude\": 37.7668,\n"
+ " \"Longitude\": -122.3959,\n"
+ " \"Address\": \"\",\n"
+ " \"City\": \"SAN FRANCISCO\",\n"
+ " \"State\": \"CA\",\n"
+ " \"Zip\": \"94107\",\n"
+ " \"Country\": \"US\"\n"
+ " },\n"
+ " {\n"
+ " \"precision\": \"zip\",\n"
+ " \"Latitude\": 37.371991,\n"
+ " \"Longitude\": -122.026020,\n"
+ " \"Address\": \"\",\n"
+ " \"City\": \"SUNNYVALE\",\n"
+ " \"State\": \"CA\",\n"
+ " \"Zip\": \"94085\",\n"
+ " \"Country\": \"US\"\n"
+ " }\n"
+ "]",
+ 38, "");
+
+ VAL_ARRAY_BEGIN(36);
+ VAL_OBJECT_BEGIN(16);
+ VAL_NAME("precision");
+ VAL_STRING("zip");
+ VAL_NAME("Latitude");
+ VAL_NUMBER("37.7668");
+ VAL_NAME("Longitude");
+ VAL_NUMBER("-122.3959");
+ VAL_NAME("Address");
+ VAL_STRING("");
+ VAL_NAME("City");
+ VAL_STRING("SAN FRANCISCO");
+ VAL_NAME("State");
+ VAL_STRING("CA");
+ VAL_NAME("Zip");
+ VAL_STRING("94107");
+ VAL_NAME("Country");
+ VAL_STRING("US");
+ VAL_OBJECT_END();
+ VAL_OBJECT_BEGIN(16);
+ VAL_NAME("precision");
+ VAL_STRING("zip");
+ VAL_NAME("Latitude");
+ VAL_NUMBER("37.371991");
+ VAL_NAME("Longitude");
+ VAL_NUMBER("-122.026020");
+ VAL_NAME("Address");
+ VAL_STRING("");
+ VAL_NAME("City");
+ VAL_STRING("SUNNYVALE");
+ VAL_NAME("State");
+ VAL_STRING("CA");
+ VAL_NAME("Zip");
+ VAL_STRING("94085");
+ VAL_NAME("Country");
+ VAL_STRING("US");
+ VAL_OBJECT_END();
+ VAL_ARRAY_END();
+
+ /* Trailing garbage */
+ PARSE_PASS("{\"a\": [0, 1, 2]}]", 8, "]");
+ VAL_OBJECT_BEGIN(6);
+ VAL_NAME("a");
+ VAL_ARRAY_BEGIN(3);
+ VAL_NUMBER("0");
+ VAL_NUMBER("1");
+ VAL_NUMBER("2");
+ VAL_ARRAY_END();
+ VAL_OBJECT_END();
+
+ PARSE_PASS("{\"a\": [0, 1, 2]}}", 8, "}");
+ PARSE_PASS("{\"a\": [0, 1, 2]}]", 8, "]");
+ VAL_OBJECT_BEGIN(6);
+ VAL_NAME("a");
+ VAL_ARRAY_BEGIN(3);
+ VAL_NUMBER("0");
+ VAL_NUMBER("1");
+ VAL_NUMBER("2");
+ VAL_ARRAY_END();
+ VAL_OBJECT_END();
+
+ PARSE_FAIL("{\"a\": [0, 1, 2}]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("{\"a\": [0, 1, 2]", SPDK_JSON_PARSE_INCOMPLETE);
+}
+
+
+static void
+test_parse_comment(void)
+{
+ /* Comments are not allowed by the JSON RFC */
+ PARSE_PASS("[0]", 3, "");
+ PARSE_FAIL("/* test */[0]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("[/* test */0]", SPDK_JSON_PARSE_INVALID);
+ PARSE_FAIL("[0/* test */]", SPDK_JSON_PARSE_INVALID);
+
+ /*
+ * This is allowed since the parser stops once it reads a complete JSON object.
+ * The next parse call would fail (see tests above) when parsing the comment.
+ */
+ PARSE_PASS("[0]/* test */", 3, "/* test */");
+
+ /*
+ * Test with non-standard comments enabled.
+ */
+ PARSE_PASS_FLAGS("/* test */[0]", 3, "", SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ VAL_ARRAY_BEGIN(1);
+ VAL_NUMBER("0");
+ VAL_ARRAY_END();
+
+ PARSE_PASS_FLAGS("[/* test */0]", 3, "", SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ VAL_ARRAY_BEGIN(1);
+ VAL_NUMBER("0");
+ VAL_ARRAY_END();
+
+ PARSE_PASS_FLAGS("[0/* test */]", 3, "", SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ VAL_ARRAY_BEGIN(1);
+ VAL_NUMBER("0");
+ VAL_ARRAY_END();
+
+ PARSE_FAIL_FLAGS("/* test */", SPDK_JSON_PARSE_INCOMPLETE, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ PARSE_FAIL_FLAGS("[/* test */", SPDK_JSON_PARSE_INCOMPLETE, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ PARSE_FAIL_FLAGS("[0/* test */", SPDK_JSON_PARSE_INCOMPLETE, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+
+ /*
+ * Single-line comments
+ */
+ PARSE_PASS_FLAGS("// test\n0", 1, "", SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ VAL_NUMBER("0");
+
+ PARSE_PASS_FLAGS("// test\r\n0", 1, "", SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ VAL_NUMBER("0");
+
+ PARSE_PASS_FLAGS("// [0] test\n0", 1, "", SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ VAL_NUMBER("0");
+
+ PARSE_FAIL_FLAGS("//", SPDK_JSON_PARSE_INCOMPLETE, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ PARSE_FAIL_FLAGS("// test", SPDK_JSON_PARSE_INCOMPLETE, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+ PARSE_FAIL_FLAGS("//\n", SPDK_JSON_PARSE_INCOMPLETE, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+
+ /* Invalid character following slash */
+ PARSE_FAIL_FLAGS("[0/x", SPDK_JSON_PARSE_INVALID, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+
+ /* Single slash at end of buffer */
+ PARSE_FAIL_FLAGS("[0/", SPDK_JSON_PARSE_INCOMPLETE, SPDK_JSON_PARSE_FLAG_ALLOW_COMMENTS);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("json", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "parse_literal", test_parse_literal) == NULL ||
+ CU_add_test(suite, "parse_string_simple", test_parse_string_simple) == NULL ||
+ CU_add_test(suite, "parse_string_control_chars", test_parse_string_control_chars) == NULL ||
+ CU_add_test(suite, "parse_string_utf8", test_parse_string_utf8) == NULL ||
+ CU_add_test(suite, "parse_string_escapes_twochar", test_parse_string_escapes_twochar) == NULL ||
+ CU_add_test(suite, "parse_string_escapes_unicode", test_parse_string_escapes_unicode) == NULL ||
+ CU_add_test(suite, "parse_number", test_parse_number) == NULL ||
+ CU_add_test(suite, "parse_array", test_parse_array) == NULL ||
+ CU_add_test(suite, "parse_object", test_parse_object) == NULL ||
+ CU_add_test(suite, "parse_nesting", test_parse_nesting) == NULL ||
+ CU_add_test(suite, "parse_comment", test_parse_comment) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/json/json_util.c/.gitignore b/src/spdk/test/unit/lib/json/json_util.c/.gitignore
new file mode 100644
index 00000000..02f6d50c
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_util.c/.gitignore
@@ -0,0 +1 @@
+json_util_ut
diff --git a/src/spdk/test/unit/lib/json/json_util.c/Makefile b/src/spdk/test/unit/lib/json/json_util.c/Makefile
new file mode 100644
index 00000000..c9a28208
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_util.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = json_util_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/json/json_util.c/json_util_ut.c b/src/spdk/test/unit/lib/json/json_util.c/json_util_ut.c
new file mode 100644
index 00000000..203b744e
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_util.c/json_util_ut.c
@@ -0,0 +1,963 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "json/json_util.c"
+
+/* For spdk_json_parse() */
+#include "json/json_parse.c"
+
+#define NUM_SETUP(x) \
+ snprintf(buf, sizeof(buf), "%s", x); \
+ v.type = SPDK_JSON_VAL_NUMBER; \
+ v.start = buf; \
+ v.len = sizeof(x) - 1
+
+#define NUM_UINT16_PASS(s, i) \
+ NUM_SETUP(s); \
+ CU_ASSERT(spdk_json_number_to_uint16(&v, &u16) == 0); \
+ CU_ASSERT(u16 == i)
+
+#define NUM_UINT16_FAIL(s) \
+ NUM_SETUP(s); \
+ CU_ASSERT(spdk_json_number_to_uint16(&v, &u16) != 0)
+
+#define NUM_INT32_PASS(s, i) \
+ NUM_SETUP(s); \
+ CU_ASSERT(spdk_json_number_to_int32(&v, &i32) == 0); \
+ CU_ASSERT(i32 == i)
+
+#define NUM_INT32_FAIL(s) \
+ NUM_SETUP(s); \
+ CU_ASSERT(spdk_json_number_to_int32(&v, &i32) != 0)
+
+#define NUM_UINT64_PASS(s, i) \
+ NUM_SETUP(s); \
+ CU_ASSERT(spdk_json_number_to_uint64(&v, &u64) == 0); \
+ CU_ASSERT(u64 == i)
+
+#define NUM_UINT64_FAIL(s) \
+ NUM_SETUP(s); \
+ CU_ASSERT(spdk_json_number_to_uint64(&v, &u64) != 0)
+
+static void
+test_strequal(void)
+{
+ struct spdk_json_val v;
+
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "test";
+ v.len = sizeof("test") - 1;
+ CU_ASSERT(spdk_json_strequal(&v, "test") == true);
+ CU_ASSERT(spdk_json_strequal(&v, "TEST") == false);
+ CU_ASSERT(spdk_json_strequal(&v, "hello") == false);
+ CU_ASSERT(spdk_json_strequal(&v, "t") == false);
+
+ v.type = SPDK_JSON_VAL_NAME;
+ CU_ASSERT(spdk_json_strequal(&v, "test") == true);
+
+ v.type = SPDK_JSON_VAL_NUMBER;
+ CU_ASSERT(spdk_json_strequal(&v, "test") == false);
+
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "test\0hello";
+ v.len = sizeof("test\0hello") - 1;
+ CU_ASSERT(spdk_json_strequal(&v, "test") == false);
+}
+
+static void
+test_num_to_uint16(void)
+{
+ struct spdk_json_val v;
+ char buf[100];
+ uint16_t u16 = 0;
+
+ NUM_SETUP("1234");
+ CU_ASSERT(spdk_json_number_to_uint16(&v, &u16) == 0);
+ CU_ASSERT(u16 == 1234);
+
+ NUM_UINT16_PASS("0", 0);
+ NUM_UINT16_PASS("1234", 1234);
+ NUM_UINT16_PASS("1234.00000", 1234);
+ NUM_UINT16_PASS("1.2e1", 12);
+ NUM_UINT16_PASS("12340e-1", 1234);
+
+ NUM_UINT16_FAIL("1.2");
+ NUM_UINT16_FAIL("-1234");
+ NUM_UINT16_FAIL("1.2E0");
+ NUM_UINT16_FAIL("1.234e1");
+ NUM_UINT16_FAIL("12341e-1");
+}
+
+static void
+test_num_to_int32(void)
+{
+ struct spdk_json_val v;
+ char buf[100];
+ int32_t i32 = 0;
+
+ NUM_SETUP("1234");
+ CU_ASSERT(spdk_json_number_to_int32(&v, &i32) == 0);
+ CU_ASSERT(i32 == 1234);
+
+
+ NUM_INT32_PASS("0", 0);
+ NUM_INT32_PASS("1234", 1234);
+ NUM_INT32_PASS("-1234", -1234);
+ NUM_INT32_PASS("1234.00000", 1234);
+ NUM_INT32_PASS("1.2e1", 12);
+ NUM_INT32_PASS("12340e-1", 1234);
+ NUM_INT32_PASS("-0", 0);
+
+ NUM_INT32_FAIL("1.2");
+ NUM_INT32_FAIL("1.2E0");
+ NUM_INT32_FAIL("1.234e1");
+ NUM_INT32_FAIL("12341e-1");
+}
+
+static void
+test_num_to_uint64(void)
+{
+ struct spdk_json_val v;
+ char buf[100];
+ uint64_t u64 = 0;
+
+ NUM_SETUP("1234");
+ CU_ASSERT(spdk_json_number_to_uint64(&v, &u64) == 0);
+ CU_ASSERT(u64 == 1234);
+
+
+ NUM_UINT64_PASS("0", 0);
+ NUM_UINT64_PASS("1234", 1234);
+ NUM_UINT64_PASS("1234.00000", 1234);
+ NUM_UINT64_PASS("1.2e1", 12);
+ NUM_UINT64_PASS("12340e-1", 1234);
+ NUM_UINT64_PASS("123456780e-1", 12345678);
+
+ NUM_UINT64_FAIL("1.2");
+ NUM_UINT64_FAIL("-1234");
+ NUM_UINT64_FAIL("1.2E0");
+ NUM_UINT64_FAIL("1.234e1");
+ NUM_UINT64_FAIL("12341e-1");
+ NUM_UINT64_FAIL("123456781e-1");
+}
+
+static void
+test_decode_object(void)
+{
+ struct my_object {
+ char *my_name;
+ uint32_t my_int;
+ bool my_bool;
+ };
+ struct spdk_json_val object[] = {
+ {"", 6, SPDK_JSON_VAL_OBJECT_BEGIN},
+ {"first", 5, SPDK_JSON_VAL_NAME},
+ {"HELLO", 5, SPDK_JSON_VAL_STRING},
+ {"second", 6, SPDK_JSON_VAL_NAME},
+ {"234", 3, SPDK_JSON_VAL_NUMBER},
+ {"third", 5, SPDK_JSON_VAL_NAME},
+ {"", 1, SPDK_JSON_VAL_TRUE},
+ {"", 0, SPDK_JSON_VAL_OBJECT_END},
+ };
+
+ struct spdk_json_object_decoder decoders[] = {
+ {"first", offsetof(struct my_object, my_name), spdk_json_decode_string, false},
+ {"second", offsetof(struct my_object, my_int), spdk_json_decode_uint32, false},
+ {"third", offsetof(struct my_object, my_bool), spdk_json_decode_bool, false},
+ {"fourth", offsetof(struct my_object, my_bool), spdk_json_decode_bool, true},
+ };
+ struct my_object output = {
+ .my_name = NULL,
+ .my_int = 0,
+ .my_bool = false,
+ };
+ uint32_t answer = 234;
+ char *answer_str = "HELLO";
+ bool answer_bool = true;
+
+ /* Passing Test: object containing simple types */
+ CU_ASSERT(spdk_json_decode_object(object, decoders, 4, &output) == 0);
+ SPDK_CU_ASSERT_FATAL(output.my_name != NULL);
+ CU_ASSERT(memcmp(output.my_name, answer_str, 6) == 0);
+ CU_ASSERT(output.my_int == answer);
+ CU_ASSERT(output.my_bool == answer_bool);
+
+ /* Failing Test: member with no matching decoder */
+ /* i.e. I remove the matching decoder from the boolean argument */
+ CU_ASSERT(spdk_json_decode_object(object, decoders, 2, &output) != 0);
+
+ /* Failing Test: non-optional decoder with no corresponding member */
+
+ decoders[3].optional = false;
+ CU_ASSERT(spdk_json_decode_object(object, decoders, 4, &output) != 0);
+
+ /* return to base state */
+ decoders[3].optional = true;
+
+ /* Failing Test: duplicated names for json values */
+ object[3].start = "first";
+ object[3].len = 5;
+ CU_ASSERT(spdk_json_decode_object(object, decoders, 3, &output) != 0);
+
+ /* return to base state */
+ object[3].start = "second";
+ object[3].len = 6;
+
+ /* Failing Test: invalid value for decoder */
+ object[2].start = "HELO";
+ CU_ASSERT(spdk_json_decode_object(object, decoders, 3, &output) != 0);
+
+ /* return to base state */
+ object[2].start = "HELLO";
+
+ /* Failing Test: not an object */
+ object[0].type = SPDK_JSON_VAL_ARRAY_BEGIN;
+ CU_ASSERT(spdk_json_decode_object(object, decoders, 3, &output) != 0);
+
+ free(output.my_name);
+}
+
+static void
+test_decode_array(void)
+{
+ struct spdk_json_val values[4];
+ uint32_t my_int[2] = {0, 0};
+ char *my_string[2] = {NULL, NULL};
+ size_t out_size;
+
+ /* passing integer test */
+ values[0].type = SPDK_JSON_VAL_ARRAY_BEGIN;
+ values[0].len = 2;
+ values[1].type = SPDK_JSON_VAL_NUMBER;
+ values[1].len = 4;
+ values[1].start = "1234";
+ values[2].type = SPDK_JSON_VAL_NUMBER;
+ values[2].len = 4;
+ values[2].start = "5678";
+ values[3].type = SPDK_JSON_VAL_ARRAY_END;
+ CU_ASSERT(spdk_json_decode_array(values, spdk_json_decode_uint32, my_int, 2, &out_size,
+ sizeof(uint32_t)) == 0);
+ CU_ASSERT(my_int[0] == 1234);
+ CU_ASSERT(my_int[1] == 5678);
+ CU_ASSERT(out_size == 2);
+
+ /* array length exceeds max */
+ values[0].len = 3;
+ CU_ASSERT(spdk_json_decode_array(values, spdk_json_decode_uint32, my_int, 2, &out_size,
+ sizeof(uint32_t)) != 0);
+
+ /* mixed types */
+ values[0].len = 2;
+ values[2].type = SPDK_JSON_VAL_STRING;
+ values[2].len = 5;
+ values[2].start = "HELLO";
+ CU_ASSERT(spdk_json_decode_array(values, spdk_json_decode_uint32, my_int, 2, &out_size,
+ sizeof(uint32_t)) != 0);
+
+ /* no array start */
+ values[0].type = SPDK_JSON_VAL_NUMBER;
+ values[2].type = SPDK_JSON_VAL_NUMBER;
+ values[2].len = 4;
+ values[2].start = "5678";
+ CU_ASSERT(spdk_json_decode_array(values, spdk_json_decode_uint32, my_int, 2, &out_size,
+ sizeof(uint32_t)) != 0);
+
+ /* mismatched array type and parser */
+ values[0].type = SPDK_JSON_VAL_ARRAY_BEGIN;
+ values[1].type = SPDK_JSON_VAL_STRING;
+ values[1].len = 5;
+ values[1].start = "HELLO";
+ values[2].type = SPDK_JSON_VAL_STRING;
+ values[2].len = 5;
+ values[2].start = "WORLD";
+ CU_ASSERT(spdk_json_decode_array(values, spdk_json_decode_uint32, my_int, 2, &out_size,
+ sizeof(uint32_t)) != 0);
+
+ /* passing String example */
+ CU_ASSERT(spdk_json_decode_array(values, spdk_json_decode_string, my_string, 2, &out_size,
+ sizeof(char *)) == 0);
+ SPDK_CU_ASSERT_FATAL(my_string[0] != NULL);
+ SPDK_CU_ASSERT_FATAL(my_string[1] != NULL);
+ CU_ASSERT(memcmp(my_string[0], "HELLO", 6) == 0);
+ CU_ASSERT(memcmp(my_string[1], "WORLD", 6) == 0);
+ CU_ASSERT(out_size == 2);
+
+ free(my_string[0]);
+ free(my_string[1]);
+}
+
+static void
+test_decode_bool(void)
+{
+ struct spdk_json_val v;
+ bool b;
+
+ /* valid bool (true) */
+ v.type = SPDK_JSON_VAL_TRUE;
+ b = false;
+ CU_ASSERT(spdk_json_decode_bool(&v, &b) == 0);
+ CU_ASSERT(b == true);
+
+ /* valid bool (false) */
+ v.type = SPDK_JSON_VAL_FALSE;
+ b = true;
+ CU_ASSERT(spdk_json_decode_bool(&v, &b) == 0);
+ CU_ASSERT(b == false);
+
+ /* incorrect type */
+ v.type = SPDK_JSON_VAL_NULL;
+ CU_ASSERT(spdk_json_decode_bool(&v, &b) != 0);
+}
+
+static void
+test_decode_int32(void)
+{
+ struct spdk_json_val v;
+ int32_t i;
+
+ /* correct type and valid value */
+ v.type = SPDK_JSON_VAL_NUMBER;
+ v.start = "33";
+ v.len = 2;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) == 0);
+ CU_ASSERT(i == 33)
+
+ /* correct type and invalid value (float) */
+ v.start = "32.45";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* incorrect type */
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "String";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* incorrect type */
+ v.type = SPDK_JSON_VAL_TRUE;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* edge case (integer max) */
+ v.type = SPDK_JSON_VAL_NUMBER;
+ v.start = "2147483647";
+ v.len = 10;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) == 0);
+ CU_ASSERT(i == 2147483647);
+
+ /* invalid value (overflow) */
+ v.start = "2147483648";
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* edge case (integer min) */
+ v.type = SPDK_JSON_VAL_NUMBER;
+ v.start = "-2147483648";
+ v.len = 11;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) == 0);
+ CU_ASSERT(i == -2147483648);
+
+ /* invalid value (overflow) */
+ v.start = "-2147483649";
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* valid exponent */
+ v.start = "4e3";
+ v.len = 3;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) == 0);
+ CU_ASSERT(i == 4000);
+
+ /* invalid negative exponent */
+ v.start = "-400e-4";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* invalid negative exponent */
+ v.start = "400e-4";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* valid negative exponent */
+ v.start = "-400e-2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) == 0);
+ CU_ASSERT(i == -4)
+
+ /* invalid exponent (overflow) */
+ v.start = "-2e32";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+
+ /* valid exponent with decimal */
+ v.start = "2.13e2";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) == 0);
+ CU_ASSERT(i == 213)
+
+ /* invalid exponent with decimal */
+ v.start = "2.134e2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_int32(&v, &i) != 0);
+}
+
+static void
+test_decode_uint16(void)
+{
+ struct spdk_json_val v;
+ uint32_t i;
+
+ /* incorrect type */
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "Strin";
+ v.len = 5;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* invalid value (float) */
+ v.type = SPDK_JSON_VAL_NUMBER;
+ v.start = "123.4";
+ v.len = 5;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* edge case (0) */
+ v.start = "0";
+ v.len = 1;
+ i = 456;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) == 0);
+ CU_ASSERT(i == 0);
+
+ /* invalid value (negative) */
+ v.start = "-1";
+ v.len = 2;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* edge case (maximum) */
+ v.start = "65535";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) == 0);
+ CU_ASSERT(i == 65535);
+
+ /* invalid value (overflow) */
+ v.start = "65536";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* valid exponent */
+ v.start = "66E2";
+ v.len = 4;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) == 0);
+ CU_ASSERT(i == 6600);
+
+ /* invalid exponent (overflow) */
+ v.start = "66E3";
+ v.len = 4;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* invalid exponent (decimal) */
+ v.start = "65.535E2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* valid exponent with decimal */
+ v.start = "65.53E2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) == 0);
+ CU_ASSERT(i == 6553);
+
+ /* invalid negative exponent */
+ v.start = "40e-2";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* invalid negative exponent */
+ v.start = "-40e-1";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) != 0);
+
+ /* valid negative exponent */
+ v.start = "40e-1";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint16(&v, &i) == 0);
+ CU_ASSERT(i == 4);
+}
+
+static void
+test_decode_uint32(void)
+{
+ struct spdk_json_val v;
+ uint32_t i;
+
+ /* incorrect type */
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "String";
+ v.len = 6;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* invalid value (float) */
+ v.type = SPDK_JSON_VAL_NUMBER;
+ v.start = "123.45";
+ v.len = 6;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* edge case (0) */
+ v.start = "0";
+ v.len = 1;
+ i = 456;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) == 0);
+ CU_ASSERT(i == 0);
+
+ /* invalid value (negative) */
+ v.start = "-1";
+ v.len = 2;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* edge case (maximum) */
+ v.start = "4294967295";
+ v.len = 10;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) == 0);
+ CU_ASSERT(i == 4294967295);
+
+ /* invalid value (overflow) */
+ v.start = "4294967296";
+ v.len = 10;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* valid exponent */
+ v.start = "42E2";
+ v.len = 4;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) == 0);
+ CU_ASSERT(i == 4200);
+
+ /* invalid exponent (overflow) */
+ v.start = "42e32";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* invalid exponent (decimal) */
+ v.start = "42.323E2";
+ v.len = 8;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* valid exponent with decimal */
+ v.start = "42.32E2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) == 0);
+ CU_ASSERT(i == 4232);
+
+ /* invalid negative exponent */
+ v.start = "400e-4";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* invalid negative exponent */
+ v.start = "-400e-2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) != 0);
+
+ /* valid negative exponent */
+ v.start = "400e-2";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) == 0);
+ CU_ASSERT(i == 4);
+
+ /* valid negative exponent */
+ v.start = "10e-1";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint32(&v, &i) == 0);
+ CU_ASSERT(i == 1)
+}
+
+static void
+test_decode_uint64(void)
+{
+ struct spdk_json_val v;
+ uint64_t i;
+
+ /* incorrect type */
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "String";
+ v.len = 6;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* invalid value (float) */
+ v.type = SPDK_JSON_VAL_NUMBER;
+ v.start = "123.45";
+ v.len = 6;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* edge case (0) */
+ v.start = "0";
+ v.len = 1;
+ i = 456;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) == 0);
+ CU_ASSERT(i == 0);
+
+ /* invalid value (negative) */
+ v.start = "-1";
+ v.len = 2;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* edge case (maximum) */
+ v.start = "18446744073709551615";
+ v.len = 20;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) == 0);
+ CU_ASSERT(i == 18446744073709551615U);
+
+ /* invalid value (overflow) */
+ v.start = "18446744073709551616";
+ v.len = 20;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* valid exponent */
+ v.start = "42E2";
+ v.len = 4;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) == 0);
+ CU_ASSERT(i == 4200);
+
+ /* invalid exponent (overflow) */
+ v.start = "42e64";
+ v.len = 5;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* invalid exponent (decimal) */
+ v.start = "42.323E2";
+ v.len = 8;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* valid exponent with decimal */
+ v.start = "42.32E2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) == 0);
+ CU_ASSERT(i == 4232);
+
+ /* invalid negative exponent */
+ v.start = "400e-4";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* invalid negative exponent */
+ v.start = "-400e-2";
+ v.len = 7;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) != 0);
+
+ /* valid negative exponent */
+ v.start = "400e-2";
+ v.len = 6;
+ i = 0;
+ CU_ASSERT(spdk_json_decode_uint64(&v, &i) == 0);
+ CU_ASSERT(i == 4)
+}
+
+static void
+test_decode_string(void)
+{
+ struct spdk_json_val v;
+ char *value = NULL;
+
+ /* Passing Test: Standard */
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "HELLO";
+ v.len = 5;
+ CU_ASSERT(spdk_json_decode_string(&v, &value) == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(memcmp(value, v.start, 6) == 0);
+
+ /* Edge Test: Empty String */
+ v.start = "";
+ v.len = 0;
+ CU_ASSERT(spdk_json_decode_string(&v, &value) == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(memcmp(value, v.start, 1) == 0);
+
+ /*
+ * Failing Test: Null Terminator In String
+ * It is valid for a json string to contain \u0000 and the parser will accept it.
+ * However, a null terminated C string cannot contain '\0' and should be rejected
+ * if that character is found before the end of the string.
+ */
+ v.start = "HELO";
+ v.len = 5;
+ CU_ASSERT(spdk_json_decode_string(&v, &value) != 0);
+
+ /* Failing Test: Wrong Type */
+ v.start = "45673";
+ v.type = SPDK_JSON_VAL_NUMBER;
+ CU_ASSERT(spdk_json_decode_string(&v, &value) != 0);
+
+ /* Passing Test: Special Characters */
+ v.type = SPDK_JSON_VAL_STRING;
+ v.start = "HE\bLL\tO\\WORLD";
+ v.len = 13;
+ CU_ASSERT(spdk_json_decode_string(&v, &value) == 0);
+ SPDK_CU_ASSERT_FATAL(value != NULL);
+ CU_ASSERT(memcmp(value, v.start, 14) == 0);
+
+ free(value);
+}
+
+char ut_json_text[] =
+ "{"
+ " \"string\": \"Some string data\","
+ " \"object\": { "
+ " \"another_string\": \"Yet anoter string data\","
+ " \"array name with space\": [1, [], {} ]"
+ " },"
+ " \"array\": [ \"Text\", 2, {} ]"
+ "}"
+ ;
+
+static void
+test_find(void)
+{
+ struct spdk_json_val *values, *key, *val, *key2, *val2;
+ ssize_t values_cnt;
+ ssize_t rc;
+
+ values_cnt = spdk_json_parse(ut_json_text, strlen(ut_json_text), NULL, 0, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(values_cnt > 0);
+
+ values = calloc(values_cnt, sizeof(struct spdk_json_val));
+ SPDK_CU_ASSERT_FATAL(values != NULL);
+
+ rc = spdk_json_parse(ut_json_text, strlen(ut_json_text), values, values_cnt, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(values_cnt == rc);
+
+ key = val = NULL;
+ rc = spdk_json_find(values, "string", &key, &val, SPDK_JSON_VAL_STRING);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(key != NULL && spdk_json_strequal(key, "string") == true);
+ CU_ASSERT(val != NULL && spdk_json_strequal(val, "Some string data") == true)
+
+ key = val = NULL;
+ rc = spdk_json_find(values, "object", &key, &val, SPDK_JSON_VAL_OBJECT_BEGIN);
+ CU_ASSERT(rc == 0);
+
+ CU_ASSERT(key != NULL && spdk_json_strequal(key, "object") == true);
+
+ /* Find key in "object" by passing SPDK_JSON_VAL_ANY to match any type */
+ key2 = val2 = NULL;
+ rc = spdk_json_find(val, "array name with space", &key2, &val2, SPDK_JSON_VAL_ANY);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(key2 != NULL && spdk_json_strequal(key2, "array name with space") == true);
+ CU_ASSERT(val2 != NULL && val2->type == SPDK_JSON_VAL_ARRAY_BEGIN);
+
+ /* Find the "array" key in "object" by passing SPDK_JSON_VAL_ARRAY_BEGIN to match only array */
+ key2 = val2 = NULL;
+ rc = spdk_json_find(val, "array name with space", &key2, &val2, SPDK_JSON_VAL_ARRAY_BEGIN);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(key2 != NULL && spdk_json_strequal(key2, "array name with space") == true);
+ CU_ASSERT(val2 != NULL && val2->type == SPDK_JSON_VAL_ARRAY_BEGIN);
+
+ /* Negative test - key doesn't exist */
+ key2 = val2 = NULL;
+ rc = spdk_json_find(val, "this_key_does_not_exist", &key2, &val2, SPDK_JSON_VAL_ANY);
+ CU_ASSERT(rc == -ENOENT);
+
+ /* Negative test - key type doesn't match */
+ key2 = val2 = NULL;
+ rc = spdk_json_find(val, "another_string", &key2, &val2, SPDK_JSON_VAL_ARRAY_BEGIN);
+ CU_ASSERT(rc == -EDOM);
+
+ free(values);
+}
+
+static void
+test_iterating(void)
+{
+ struct spdk_json_val *values;
+ struct spdk_json_val *string_key;
+ struct spdk_json_val *object_key, *object_val;
+ struct spdk_json_val *array_key, *array_val;
+ struct spdk_json_val *another_string_key;
+ struct spdk_json_val *array_name_with_space_key, *array_name_with_space_val;
+ struct spdk_json_val *it;
+ ssize_t values_cnt;
+ ssize_t rc;
+
+ values_cnt = spdk_json_parse(ut_json_text, strlen(ut_json_text), NULL, 0, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(values_cnt > 0);
+
+ values = calloc(values_cnt, sizeof(struct spdk_json_val));
+ SPDK_CU_ASSERT_FATAL(values != NULL);
+
+ rc = spdk_json_parse(ut_json_text, strlen(ut_json_text), values, values_cnt, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(values_cnt == rc);
+
+ /* Iterate over object keys. JSON spec doesn't guarantee order of keys in object but
+ * SPDK implementation implicitly does.
+ */
+ string_key = spdk_json_object_first(values);
+ CU_ASSERT(spdk_json_strequal(string_key, "string") == true);
+
+ object_key = spdk_json_next(string_key);
+ object_val = spdk_json_value(object_key);
+ CU_ASSERT(spdk_json_strequal(object_key, "object") == true);
+
+ array_key = spdk_json_next(object_key);
+ array_val = spdk_json_value(array_key);
+ CU_ASSERT(spdk_json_strequal(array_key, "array") == true);
+
+ /* NULL '}' */
+ CU_ASSERT(spdk_json_next(array_key) == NULL);
+
+ /* Iterate over subobjects */
+ another_string_key = spdk_json_object_first(object_val);
+ CU_ASSERT(spdk_json_strequal(another_string_key, "another_string") == true);
+
+ array_name_with_space_key = spdk_json_next(another_string_key);
+ array_name_with_space_val = spdk_json_value(array_name_with_space_key);
+ CU_ASSERT(spdk_json_strequal(array_name_with_space_key, "array name with space") == true);
+
+ CU_ASSERT(spdk_json_next(array_name_with_space_key) == NULL);
+
+ /* Iterate over array in subobject */
+ it = spdk_json_array_first(array_name_with_space_val);
+ SPDK_CU_ASSERT_FATAL(it != NULL);
+ CU_ASSERT(it->type == SPDK_JSON_VAL_NUMBER);
+
+ it = spdk_json_next(it);
+ SPDK_CU_ASSERT_FATAL(it != NULL);
+ CU_ASSERT(it->type == SPDK_JSON_VAL_ARRAY_BEGIN);
+
+ it = spdk_json_next(it);
+ SPDK_CU_ASSERT_FATAL(it != NULL);
+ CU_ASSERT(it->type == SPDK_JSON_VAL_OBJECT_BEGIN);
+
+ it = spdk_json_next(it);
+ CU_ASSERT(it == NULL);
+
+ /* Iterate over array in root object */
+ it = spdk_json_array_first(array_val);
+ SPDK_CU_ASSERT_FATAL(it != NULL);
+ CU_ASSERT(it->type == SPDK_JSON_VAL_STRING);
+
+ it = spdk_json_next(it);
+ SPDK_CU_ASSERT_FATAL(it != NULL);
+ CU_ASSERT(it->type == SPDK_JSON_VAL_NUMBER);
+
+ it = spdk_json_next(it);
+ SPDK_CU_ASSERT_FATAL(it != NULL);
+ CU_ASSERT(it->type == SPDK_JSON_VAL_OBJECT_BEGIN);
+
+ /* Array end */
+ it = spdk_json_next(it);
+ CU_ASSERT(it == NULL);
+
+ free(values);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("json", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "strequal", test_strequal) == NULL ||
+ CU_add_test(suite, "num_to_uint16", test_num_to_uint16) == NULL ||
+ CU_add_test(suite, "num_to_int32", test_num_to_int32) == NULL ||
+ CU_add_test(suite, "num_to_uint64", test_num_to_uint64) == NULL ||
+ CU_add_test(suite, "decode_object", test_decode_object) == NULL ||
+ CU_add_test(suite, "decode_array", test_decode_array) == NULL ||
+ CU_add_test(suite, "decode_bool", test_decode_bool) == NULL ||
+ CU_add_test(suite, "decode_uint16", test_decode_uint16) == NULL ||
+ CU_add_test(suite, "decode_int32", test_decode_int32) == NULL ||
+ CU_add_test(suite, "decode_uint32", test_decode_uint32) == NULL ||
+ CU_add_test(suite, "decode_uint64", test_decode_uint64) == NULL ||
+ CU_add_test(suite, "decode_string", test_decode_string) == NULL ||
+ CU_add_test(suite, "find_object", test_find) == NULL ||
+ CU_add_test(suite, "iterating", test_iterating) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/json/json_write.c/.gitignore b/src/spdk/test/unit/lib/json/json_write.c/.gitignore
new file mode 100644
index 00000000..dd576b23
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_write.c/.gitignore
@@ -0,0 +1 @@
+json_write_ut
diff --git a/src/spdk/test/unit/lib/json/json_write.c/Makefile b/src/spdk/test/unit/lib/json/json_write.c/Makefile
new file mode 100644
index 00000000..9fe1fa91
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_write.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = json_write_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/json/json_write.c/json_write_ut.c b/src/spdk/test/unit/lib/json/json_write.c/json_write_ut.c
new file mode 100644
index 00000000..70c62fe1
--- /dev/null
+++ b/src/spdk/test/unit/lib/json/json_write.c/json_write_ut.c
@@ -0,0 +1,745 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "json/json_write.c"
+#include "json/json_parse.c"
+
+#include "spdk/util.h"
+
+static uint8_t g_buf[1000];
+static uint8_t *g_write_pos;
+
+static int
+write_cb(void *cb_ctx, const void *data, size_t size)
+{
+ size_t buf_free = g_buf + sizeof(g_buf) - g_write_pos;
+
+ if (size > buf_free) {
+ return -1;
+ }
+
+ memcpy(g_write_pos, data, size);
+ g_write_pos += size;
+
+ return 0;
+}
+
+#define BEGIN() \
+ memset(g_buf, 0, sizeof(g_buf)); \
+ g_write_pos = g_buf; \
+ w = spdk_json_write_begin(write_cb, NULL, 0); \
+ SPDK_CU_ASSERT_FATAL(w != NULL)
+
+#define END(json) \
+ CU_ASSERT(spdk_json_write_end(w) == 0); \
+ CU_ASSERT(g_write_pos - g_buf == sizeof(json) - 1); \
+ CU_ASSERT(memcmp(json, g_buf, sizeof(json) - 1) == 0)
+
+#define END_NOCMP() \
+ CU_ASSERT(spdk_json_write_end(w) == 0)
+
+#define END_FAIL() \
+ CU_ASSERT(spdk_json_write_end(w) < 0)
+
+#define VAL_STRING(str) \
+ CU_ASSERT(spdk_json_write_string_raw(w, str, sizeof(str) - 1) == 0)
+
+#define VAL_STRING_FAIL(str) \
+ CU_ASSERT(spdk_json_write_string_raw(w, str, sizeof(str) - 1) < 0)
+
+#define STR_PASS(in, out) \
+ BEGIN(); VAL_STRING(in); END("\"" out "\"")
+
+#define STR_FAIL(in) \
+ BEGIN(); VAL_STRING_FAIL(in); END_FAIL()
+
+#define VAL_STRING_UTF16LE(str) \
+ CU_ASSERT(spdk_json_write_string_utf16le_raw(w, (const uint16_t *)str, sizeof(str) / sizeof(uint16_t) - 1) == 0)
+
+#define VAL_STRING_UTF16LE_FAIL(str) \
+ CU_ASSERT(spdk_json_write_string_utf16le_raw(w, (const uint16_t *)str, sizeof(str) / sizeof(uint16_t) - 1) < 0)
+
+#define STR_UTF16LE_PASS(in, out) \
+ BEGIN(); VAL_STRING_UTF16LE(in); END("\"" out "\"")
+
+#define STR_UTF16LE_FAIL(in) \
+ BEGIN(); VAL_STRING_UTF16LE_FAIL(in); END_FAIL()
+
+#define VAL_NAME(name) \
+ CU_ASSERT(spdk_json_write_name_raw(w, name, sizeof(name) - 1) == 0)
+
+#define VAL_NULL() CU_ASSERT(spdk_json_write_null(w) == 0)
+#define VAL_TRUE() CU_ASSERT(spdk_json_write_bool(w, true) == 0)
+#define VAL_FALSE() CU_ASSERT(spdk_json_write_bool(w, false) == 0)
+
+#define VAL_INT32(i) CU_ASSERT(spdk_json_write_int32(w, i) == 0);
+#define VAL_UINT32(u) CU_ASSERT(spdk_json_write_uint32(w, u) == 0);
+
+#define VAL_INT64(i) CU_ASSERT(spdk_json_write_int64(w, i) == 0);
+#define VAL_UINT64(u) CU_ASSERT(spdk_json_write_uint64(w, u) == 0);
+
+#define VAL_ARRAY_BEGIN() CU_ASSERT(spdk_json_write_array_begin(w) == 0)
+#define VAL_ARRAY_END() CU_ASSERT(spdk_json_write_array_end(w) == 0)
+
+#define VAL_OBJECT_BEGIN() CU_ASSERT(spdk_json_write_object_begin(w) == 0)
+#define VAL_OBJECT_END() CU_ASSERT(spdk_json_write_object_end(w) == 0)
+
+#define VAL(v) CU_ASSERT(spdk_json_write_val(w, v) == 0)
+
+static void
+test_write_literal(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_NULL();
+ END("null");
+
+ BEGIN();
+ VAL_TRUE();
+ END("true");
+
+ BEGIN();
+ VAL_FALSE();
+ END("false");
+}
+
+static void
+test_write_string_simple(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ STR_PASS("hello world", "hello world");
+ STR_PASS(" ", " ");
+ STR_PASS("~", "~");
+}
+
+static void
+test_write_string_escapes(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ /* Two-character escapes */
+ STR_PASS("\b", "\\b");
+ STR_PASS("\f", "\\f");
+ STR_PASS("\n", "\\n");
+ STR_PASS("\r", "\\r");
+ STR_PASS("\t", "\\t");
+ STR_PASS("\"", "\\\"");
+ STR_PASS("\\", "\\\\");
+
+ /* JSON defines an escape for forward slash, but it is optional */
+ STR_PASS("/", "/");
+
+ STR_PASS("hello\nworld", "hello\\nworld");
+
+ STR_PASS("\x00", "\\u0000");
+ STR_PASS("\x01", "\\u0001");
+ STR_PASS("\x02", "\\u0002");
+
+ STR_PASS("\xC3\xB6", "\\u00F6");
+ STR_PASS("\xE2\x88\x9A", "\\u221A");
+ STR_PASS("\xEA\xAA\xAA", "\\uAAAA");
+
+ /* Surrogate pairs */
+ STR_PASS("\xF0\x9D\x84\x9E", "\\uD834\\uDD1E");
+ STR_PASS("\xF0\xA0\x9C\x8E", "\\uD841\\uDF0E");
+
+ /* Examples from RFC 3629 */
+ STR_PASS("\x41\xE2\x89\xA2\xCE\x91\x2E", "A\\u2262\\u0391.");
+ STR_PASS("\xED\x95\x9C\xEA\xB5\xAD\xEC\x96\xB4", "\\uD55C\\uAD6D\\uC5B4");
+ STR_PASS("\xE6\x97\xA5\xE6\x9C\xAC\xE8\xAA\x9E", "\\u65E5\\u672C\\u8A9E");
+ STR_PASS("\xEF\xBB\xBF\xF0\xA3\x8E\xB4", "\\uFEFF\\uD84C\\uDFB4");
+
+ /* UTF-8 edge cases */
+ STR_PASS("\x7F", "\\u007F");
+ STR_FAIL("\x80");
+ STR_FAIL("\xC1");
+ STR_FAIL("\xC2");
+ STR_PASS("\xC2\x80", "\\u0080");
+ STR_PASS("\xC2\xBF", "\\u00BF");
+ STR_PASS("\xDF\x80", "\\u07C0");
+ STR_PASS("\xDF\xBF", "\\u07FF");
+ STR_FAIL("\xDF");
+ STR_FAIL("\xE0\x80");
+ STR_FAIL("\xE0\x1F");
+ STR_FAIL("\xE0\x1F\x80");
+ STR_FAIL("\xE0");
+ STR_FAIL("\xE0\xA0");
+ STR_PASS("\xE0\xA0\x80", "\\u0800");
+ STR_PASS("\xE0\xA0\xBF", "\\u083F");
+ STR_FAIL("\xE0\xA0\xC0");
+ STR_PASS("\xE0\xBF\x80", "\\u0FC0");
+ STR_PASS("\xE0\xBF\xBF", "\\u0FFF");
+ STR_FAIL("\xE0\xC0\x80");
+ STR_FAIL("\xE1");
+ STR_FAIL("\xE1\x80");
+ STR_FAIL("\xE1\x7F\x80");
+ STR_FAIL("\xE1\x80\x7F");
+ STR_PASS("\xE1\x80\x80", "\\u1000");
+ STR_PASS("\xE1\x80\xBF", "\\u103F");
+ STR_PASS("\xE1\xBF\x80", "\\u1FC0");
+ STR_PASS("\xE1\xBF\xBF", "\\u1FFF");
+ STR_FAIL("\xE1\xC0\x80");
+ STR_FAIL("\xE1\x80\xC0");
+ STR_PASS("\xEF\x80\x80", "\\uF000");
+ STR_PASS("\xEF\xBF\xBF", "\\uFFFF");
+ STR_FAIL("\xF0");
+ STR_FAIL("\xF0\x90");
+ STR_FAIL("\xF0\x90\x80");
+ STR_FAIL("\xF0\x80\x80\x80");
+ STR_FAIL("\xF0\x8F\x80\x80");
+ STR_PASS("\xF0\x90\x80\x80", "\\uD800\\uDC00");
+ STR_PASS("\xF0\x90\x80\xBF", "\\uD800\\uDC3F");
+ STR_PASS("\xF0\x90\xBF\x80", "\\uD803\\uDFC0");
+ STR_PASS("\xF0\xBF\x80\x80", "\\uD8BC\\uDC00");
+ STR_FAIL("\xF0\xC0\x80\x80");
+ STR_FAIL("\xF1");
+ STR_FAIL("\xF1\x80");
+ STR_FAIL("\xF1\x80\x80");
+ STR_FAIL("\xF1\x80\x80\x7F");
+ STR_PASS("\xF1\x80\x80\x80", "\\uD8C0\\uDC00");
+ STR_PASS("\xF1\x80\x80\xBF", "\\uD8C0\\uDC3F");
+ STR_PASS("\xF1\x80\xBF\x80", "\\uD8C3\\uDFC0");
+ STR_PASS("\xF1\xBF\x80\x80", "\\uD9BC\\uDC00");
+ STR_PASS("\xF3\x80\x80\x80", "\\uDAC0\\uDC00");
+ STR_FAIL("\xF3\xC0\x80\x80");
+ STR_FAIL("\xF3\x80\xC0\x80");
+ STR_FAIL("\xF3\x80\x80\xC0");
+ STR_FAIL("\xF4");
+ STR_FAIL("\xF4\x80");
+ STR_FAIL("\xF4\x80\x80");
+ STR_PASS("\xF4\x80\x80\x80", "\\uDBC0\\uDC00");
+ STR_PASS("\xF4\x8F\x80\x80", "\\uDBFC\\uDC00");
+ STR_PASS("\xF4\x8F\xBF\xBF", "\\uDBFF\\uDFFF");
+ STR_FAIL("\xF4\x90\x80\x80");
+ STR_FAIL("\xF5");
+ STR_FAIL("\xF5\x80");
+ STR_FAIL("\xF5\x80\x80");
+ STR_FAIL("\xF5\x80\x80\x80");
+ STR_FAIL("\xF5\x80\x80\x80\x80");
+
+ /* Overlong encodings */
+ STR_FAIL("\xC0\x80");
+
+ /* Surrogate pairs */
+ STR_FAIL("\xED\xA0\x80"); /* U+D800 First high surrogate */
+ STR_FAIL("\xED\xAF\xBF"); /* U+DBFF Last high surrogate */
+ STR_FAIL("\xED\xB0\x80"); /* U+DC00 First low surrogate */
+ STR_FAIL("\xED\xBF\xBF"); /* U+DFFF Last low surrogate */
+ STR_FAIL("\xED\xA1\x8C\xED\xBE\xB4"); /* U+233B4 (invalid surrogate pair encoding) */
+}
+
+static void
+test_write_string_utf16le(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ /* All characters in BMP */
+ STR_UTF16LE_PASS(((uint8_t[]) {
+ 'H', 0, 'e', 0, 'l', 0, 'l', 0, 'o', 0, 0x15, 0xFE, 0, 0
+ }), "Hello\\uFE15");
+
+ /* Surrogate pair */
+ STR_UTF16LE_PASS(((uint8_t[]) {
+ 'H', 0, 'i', 0, 0x34, 0xD8, 0x1E, 0xDD, '!', 0, 0, 0
+ }), "Hi\\uD834\\uDD1E!");
+
+ /* Valid high surrogate, but no low surrogate */
+ STR_UTF16LE_FAIL(((uint8_t[]) {
+ 0x00, 0xD8, 0, 0 /* U+D800 */
+ }));
+
+ /* Invalid leading low surrogate */
+ STR_UTF16LE_FAIL(((uint8_t[]) {
+ 0x00, 0xDC, 0x00, 0xDC, 0, 0 /* U+DC00 U+DC00 */
+ }));
+
+ /* Valid high surrogate followed by another high surrogate (invalid) */
+ STR_UTF16LE_FAIL(((uint8_t[]) {
+ 0x00, 0xD8, 0x00, 0xD8, 0, 0 /* U+D800 U+D800 */
+ }));
+}
+
+static void
+test_write_number_int32(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_INT32(0);
+ END("0");
+
+ BEGIN();
+ VAL_INT32(1);
+ END("1");
+
+ BEGIN();
+ VAL_INT32(123);
+ END("123");
+
+ BEGIN();
+ VAL_INT32(-123);
+ END("-123");
+
+ BEGIN();
+ VAL_INT32(2147483647);
+ END("2147483647");
+
+ BEGIN();
+ VAL_INT32(-2147483648);
+ END("-2147483648");
+}
+
+static void
+test_write_number_uint32(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_UINT32(0);
+ END("0");
+
+ BEGIN();
+ VAL_UINT32(1);
+ END("1");
+
+ BEGIN();
+ VAL_UINT32(123);
+ END("123");
+
+ BEGIN();
+ VAL_UINT32(2147483647);
+ END("2147483647");
+
+ BEGIN();
+ VAL_UINT32(4294967295);
+ END("4294967295");
+}
+
+static void
+test_write_number_int64(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_INT64(0);
+ END("0");
+
+ BEGIN();
+ VAL_INT64(1);
+ END("1");
+
+ BEGIN();
+ VAL_INT64(123);
+ END("123");
+
+ BEGIN();
+ VAL_INT64(-123);
+ END("-123");
+
+ BEGIN();
+ VAL_INT64(INT64_MAX);
+ END("9223372036854775807");
+
+ BEGIN();
+ VAL_INT64(INT64_MIN);
+ END("-9223372036854775808");
+}
+
+static void
+test_write_number_uint64(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_UINT64(0);
+ END("0");
+
+ BEGIN();
+ VAL_UINT64(1);
+ END("1");
+
+ BEGIN();
+ VAL_UINT64(123);
+ END("123");
+
+ BEGIN();
+ VAL_UINT64(INT64_MAX);
+ END("9223372036854775807");
+
+ BEGIN();
+ VAL_UINT64(UINT64_MAX);
+ END("18446744073709551615");
+}
+
+static void
+test_write_array(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_END();
+ END("[]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(0);
+ VAL_ARRAY_END();
+ END("[0]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(0);
+ VAL_INT32(1);
+ VAL_ARRAY_END();
+ END("[0,1]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(0);
+ VAL_INT32(1);
+ VAL_INT32(2);
+ VAL_ARRAY_END();
+ END("[0,1,2]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_STRING("a");
+ VAL_ARRAY_END();
+ END("[\"a\"]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_STRING("a");
+ VAL_STRING("b");
+ VAL_ARRAY_END();
+ END("[\"a\",\"b\"]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_STRING("a");
+ VAL_STRING("b");
+ VAL_STRING("c");
+ VAL_ARRAY_END();
+ END("[\"a\",\"b\",\"c\"]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_TRUE();
+ VAL_ARRAY_END();
+ END("[true]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_TRUE();
+ VAL_FALSE();
+ VAL_ARRAY_END();
+ END("[true,false]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_TRUE();
+ VAL_FALSE();
+ VAL_TRUE();
+ VAL_ARRAY_END();
+ END("[true,false,true]");
+}
+
+static void
+test_write_object(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_OBJECT_END();
+ END("{}");
+
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_INT32(0);
+ VAL_OBJECT_END();
+ END("{\"a\":0}");
+
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_INT32(0);
+ VAL_NAME("b");
+ VAL_INT32(1);
+ VAL_OBJECT_END();
+ END("{\"a\":0,\"b\":1}");
+
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_INT32(0);
+ VAL_NAME("b");
+ VAL_INT32(1);
+ VAL_NAME("c");
+ VAL_INT32(2);
+ VAL_OBJECT_END();
+ END("{\"a\":0,\"b\":1,\"c\":2}");
+}
+
+static void
+test_write_nesting(void)
+{
+ struct spdk_json_write_ctx *w;
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_END();
+ VAL_ARRAY_END();
+ END("[[]]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_END();
+ VAL_ARRAY_END();
+ VAL_ARRAY_END();
+ END("[[[]]]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(0);
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_END();
+ VAL_ARRAY_END();
+ END("[0,[]]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_ARRAY_END();
+ VAL_INT32(0);
+ VAL_ARRAY_END();
+ END("[[],0]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(0);
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(1);
+ VAL_ARRAY_END();
+ VAL_INT32(2);
+ VAL_ARRAY_END();
+ END("[0,[1],2]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(0);
+ VAL_INT32(1);
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(2);
+ VAL_INT32(3);
+ VAL_ARRAY_END();
+ VAL_INT32(4);
+ VAL_INT32(5);
+ VAL_ARRAY_END();
+ END("[0,1,[2,3],4,5]");
+
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_OBJECT_BEGIN();
+ VAL_OBJECT_END();
+ VAL_OBJECT_END();
+ END("{\"a\":{}}");
+
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("b");
+ VAL_INT32(0);
+ VAL_OBJECT_END();
+ VAL_OBJECT_END();
+ END("{\"a\":{\"b\":0}}");
+
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(0);
+ VAL_ARRAY_END();
+ VAL_OBJECT_END();
+ END("{\"a\":[0]}");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_INT32(0);
+ VAL_OBJECT_END();
+ VAL_ARRAY_END();
+ END("[{\"a\":0}]");
+
+ BEGIN();
+ VAL_ARRAY_BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("a");
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("b");
+ VAL_ARRAY_BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("c");
+ VAL_INT32(1);
+ VAL_OBJECT_END();
+ VAL_INT32(2);
+ VAL_ARRAY_END();
+ VAL_NAME("d");
+ VAL_INT32(3);
+ VAL_OBJECT_END();
+ VAL_NAME("e");
+ VAL_INT32(4);
+ VAL_OBJECT_END();
+ VAL_INT32(5);
+ VAL_ARRAY_END();
+ END("[{\"a\":{\"b\":[{\"c\":1},2],\"d\":3},\"e\":4},5]");
+
+ /* Examples from RFC 7159 */
+ BEGIN();
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("Image");
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("Width");
+ VAL_INT32(800);
+ VAL_NAME("Height");
+ VAL_INT32(600);
+ VAL_NAME("Title");
+ VAL_STRING("View from 15th Floor");
+ VAL_NAME("Thumbnail");
+ VAL_OBJECT_BEGIN();
+ VAL_NAME("Url");
+ VAL_STRING("http://www.example.com/image/481989943");
+ VAL_NAME("Height");
+ VAL_INT32(125);
+ VAL_NAME("Width");
+ VAL_INT32(100);
+ VAL_OBJECT_END();
+ VAL_NAME("Animated");
+ VAL_FALSE();
+ VAL_NAME("IDs");
+ VAL_ARRAY_BEGIN();
+ VAL_INT32(116);
+ VAL_INT32(943);
+ VAL_INT32(234);
+ VAL_INT32(38793);
+ VAL_ARRAY_END();
+ VAL_OBJECT_END();
+ VAL_OBJECT_END();
+ END(
+ "{\"Image\":"
+ "{"
+ "\"Width\":800,"
+ "\"Height\":600,"
+ "\"Title\":\"View from 15th Floor\","
+ "\"Thumbnail\":{"
+ "\"Url\":\"http://www.example.com/image/481989943\","
+ "\"Height\":125,"
+ "\"Width\":100"
+ "},"
+ "\"Animated\":false,"
+ "\"IDs\":[116,943,234,38793]"
+ "}"
+ "}");
+}
+
+/* Round-trip parse and write test */
+static void
+test_write_val(void)
+{
+ struct spdk_json_write_ctx *w;
+ struct spdk_json_val values[100];
+ char src[] = "{\"a\":[1,2,3],\"b\":{\"c\":\"d\"},\"e\":true,\"f\":false,\"g\":null}";
+
+ CU_ASSERT(spdk_json_parse(src, strlen(src), values, SPDK_COUNTOF(values), NULL,
+ SPDK_JSON_PARSE_FLAG_DECODE_IN_PLACE) == 19);
+
+ BEGIN();
+ VAL(values);
+ END("{\"a\":[1,2,3],\"b\":{\"c\":\"d\"},\"e\":true,\"f\":false,\"g\":null}");
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("json", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "write_literal", test_write_literal) == NULL ||
+ CU_add_test(suite, "write_string_simple", test_write_string_simple) == NULL ||
+ CU_add_test(suite, "write_string_escapes", test_write_string_escapes) == NULL ||
+ CU_add_test(suite, "write_string_utf16le", test_write_string_utf16le) == NULL ||
+ CU_add_test(suite, "write_number_int32", test_write_number_int32) == NULL ||
+ CU_add_test(suite, "write_number_uint32", test_write_number_uint32) == NULL ||
+ CU_add_test(suite, "write_number_int64", test_write_number_int64) == NULL ||
+ CU_add_test(suite, "write_number_uint64", test_write_number_uint64) == NULL ||
+ CU_add_test(suite, "write_array", test_write_array) == NULL ||
+ CU_add_test(suite, "write_object", test_write_object) == NULL ||
+ CU_add_test(suite, "write_nesting", test_write_nesting) == NULL ||
+ CU_add_test(suite, "write_val", test_write_val) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/json_mock.c b/src/spdk/test/unit/lib/json_mock.c
new file mode 100644
index 00000000..b9cee171
--- /dev/null
+++ b/src/spdk/test/unit/lib/json_mock.c
@@ -0,0 +1,81 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/json.h"
+#include "spdk_internal/mock.h"
+
+DEFINE_STUB(spdk_json_write_begin, struct spdk_json_write_ctx *, (spdk_json_write_cb write_cb,
+ void *cb_ctx, uint32_t flags), NULL);
+
+DEFINE_STUB(spdk_json_write_end, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_null, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_bool, int, (struct spdk_json_write_ctx *w, bool val), 0);
+DEFINE_STUB(spdk_json_write_int32, int, (struct spdk_json_write_ctx *w, int32_t val), 0);
+DEFINE_STUB(spdk_json_write_uint32, int, (struct spdk_json_write_ctx *w, uint32_t val), 0);
+DEFINE_STUB(spdk_json_write_int64, int, (struct spdk_json_write_ctx *w, int64_t val), 0);
+DEFINE_STUB(spdk_json_write_uint64, int, (struct spdk_json_write_ctx *w, uint64_t val), 0);
+DEFINE_STUB(spdk_json_write_string, int, (struct spdk_json_write_ctx *w, const char *val), 0);
+DEFINE_STUB(spdk_json_write_string_raw, int, (struct spdk_json_write_ctx *w, const char *val,
+ size_t len), 0);
+
+DEFINE_STUB(spdk_json_write_array_begin, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_array_end, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_object_begin, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_object_end, int, (struct spdk_json_write_ctx *w), 0);
+DEFINE_STUB(spdk_json_write_name, int, (struct spdk_json_write_ctx *w, const char *name), 0);
+DEFINE_STUB(spdk_json_write_name_raw, int, (struct spdk_json_write_ctx *w, const char *name,
+ size_t len), 0);
+
+/* Utility functions */
+DEFINE_STUB(spdk_json_write_named_null, int, (struct spdk_json_write_ctx *w, const char *name), 0);
+DEFINE_STUB(spdk_json_write_named_bool, int, (struct spdk_json_write_ctx *w, const char *name,
+ bool val), 0);
+DEFINE_STUB(spdk_json_write_named_int32, int, (struct spdk_json_write_ctx *w, const char *name,
+ int32_t val), 0);
+DEFINE_STUB(spdk_json_write_named_uint32, int, (struct spdk_json_write_ctx *w, const char *name,
+ uint32_t val), 0);
+DEFINE_STUB(spdk_json_write_named_uint64, int, (struct spdk_json_write_ctx *w, const char *name,
+ uint64_t val), 0);
+DEFINE_STUB(spdk_json_write_named_int64, int, (struct spdk_json_write_ctx *w, const char *name,
+ int64_t val), 0);
+DEFINE_STUB(spdk_json_write_named_string, int, (struct spdk_json_write_ctx *w, const char *name,
+ const char *val), 0);
+DEFINE_STUB(spdk_json_write_named_string_fmt, int, (struct spdk_json_write_ctx *w, const char *name,
+ const char *fmt, ...), 0);
+DEFINE_STUB(spdk_json_write_named_string_fmt_v, int, (struct spdk_json_write_ctx *w,
+ const char *name, const char *fmt, va_list args), 0);
+
+DEFINE_STUB(spdk_json_write_named_array_begin, int, (struct spdk_json_write_ctx *w,
+ const char *name), 0);
+DEFINE_STUB(spdk_json_write_named_object_begin, int, (struct spdk_json_write_ctx *w,
+ const char *name), 0);
diff --git a/src/spdk/test/unit/lib/jsonrpc/Makefile b/src/spdk/test/unit/lib/jsonrpc/Makefile
new file mode 100644
index 00000000..0fc0a2e9
--- /dev/null
+++ b/src/spdk/test/unit/lib/jsonrpc/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = jsonrpc_server.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/.gitignore b/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/.gitignore
new file mode 100644
index 00000000..8852a96d
--- /dev/null
+++ b/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/.gitignore
@@ -0,0 +1 @@
+jsonrpc_server_ut
diff --git a/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/Makefile b/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/Makefile
new file mode 100644
index 00000000..6c02115f
--- /dev/null
+++ b/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/Makefile
@@ -0,0 +1,39 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = jsonrpc_server_ut.c
+SPDK_LIB_LIST = json
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/jsonrpc_server_ut.c b/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/jsonrpc_server_ut.c
new file mode 100644
index 00000000..3c62e41f
--- /dev/null
+++ b/src/spdk/test/unit/lib/jsonrpc/jsonrpc_server.c/jsonrpc_server_ut.c
@@ -0,0 +1,423 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "jsonrpc/jsonrpc_server.c"
+
+#define MAX_PARAMS 100
+#define MAX_REQS 100
+
+struct req {
+ int error;
+ bool got_method;
+ bool got_id;
+ bool got_params;
+ struct spdk_jsonrpc_request *request;
+ struct spdk_json_val method;
+ struct spdk_json_val id;
+ struct spdk_json_val params[MAX_PARAMS];
+};
+
+static uint8_t g_buf[1000];
+static struct req g_reqs[MAX_REQS];
+static struct req *g_cur_req;
+static struct spdk_json_val *g_params;
+static size_t g_num_reqs;
+
+#define PARSE_PASS(in, trailing) \
+ memcpy(g_buf, in, sizeof(in) - 1); \
+ g_num_reqs = 0; \
+ g_cur_req = NULL; \
+ CU_ASSERT(spdk_jsonrpc_parse_request(conn, g_buf, sizeof(in) - 1) == sizeof(in) - sizeof(trailing)); \
+ if (g_cur_req && g_cur_req->request) { \
+ free(g_cur_req->request->send_buf); \
+ g_cur_req->request->send_buf = NULL; \
+ }
+
+#define PARSE_FAIL(in) \
+ memcpy(g_buf, in, sizeof(in) - 1); \
+ g_num_reqs = 0; \
+ g_cur_req = 0; \
+ CU_ASSERT(spdk_jsonrpc_parse_request(conn, g_buf, sizeof(in) - 1) < 0); \
+ if (g_cur_req && g_cur_req->request) { \
+ free(g_cur_req->request->send_buf); \
+ g_cur_req->request->send_buf = NULL; \
+ }
+
+#define REQ_BEGIN(expected_error) \
+ if (g_cur_req == NULL) { \
+ g_cur_req = g_reqs; \
+ } else { \
+ g_cur_req++; \
+ } \
+ CU_ASSERT(g_cur_req - g_reqs <= (ptrdiff_t)g_num_reqs); \
+ CU_ASSERT(g_cur_req->error == expected_error)
+
+#define REQ_BEGIN_VALID() REQ_BEGIN(0)
+#define REQ_BEGIN_INVALID(expected_error) REQ_BEGIN(expected_error)
+
+#define REQ_METHOD(name) \
+ CU_ASSERT(g_cur_req->got_method); \
+ CU_ASSERT(spdk_json_strequal(&g_cur_req->method, name) == true)
+
+#define REQ_METHOD_MISSING() \
+ CU_ASSERT(g_cur_req->got_method == false)
+
+#define REQ_ID_NUM(num) \
+ CU_ASSERT(g_cur_req->got_id); \
+ CU_ASSERT(g_cur_req->id.type == SPDK_JSON_VAL_NUMBER); \
+ CU_ASSERT(memcmp(g_cur_req->id.start, num, sizeof(num) - 1) == 0)
+
+#define REQ_ID_STRING(str) \
+ CU_ASSERT(g_cur_req->got_id); \
+ CU_ASSERT(g_cur_req->id.type == SPDK_JSON_VAL_STRING); \
+ CU_ASSERT(memcmp(g_cur_req->id.start, str, sizeof(str) - 1) == 0)
+
+#define REQ_ID_NULL() \
+ CU_ASSERT(g_cur_req->got_id); \
+ CU_ASSERT(g_cur_req->id.type == SPDK_JSON_VAL_NULL)
+
+#define REQ_ID_MISSING() \
+ CU_ASSERT(g_cur_req->got_id == false)
+
+#define REQ_PARAMS_MISSING() \
+ CU_ASSERT(g_cur_req->got_params == false)
+
+#define REQ_PARAMS_BEGIN() \
+ CU_ASSERT(g_cur_req->got_params); \
+ g_params = g_cur_req->params
+
+#define PARAM_ARRAY_BEGIN() \
+ CU_ASSERT(g_params->type == SPDK_JSON_VAL_ARRAY_BEGIN); \
+ g_params++
+
+#define PARAM_ARRAY_END() \
+ CU_ASSERT(g_params->type == SPDK_JSON_VAL_ARRAY_END); \
+ g_params++
+
+#define PARAM_OBJECT_BEGIN() \
+ CU_ASSERT(g_params->type == SPDK_JSON_VAL_OBJECT_BEGIN); \
+ g_params++
+
+#define PARAM_OBJECT_END() \
+ CU_ASSERT(g_params->type == SPDK_JSON_VAL_OBJECT_END); \
+ g_params++
+
+#define PARAM_NUM(num) \
+ CU_ASSERT(g_params->type == SPDK_JSON_VAL_NUMBER); \
+ CU_ASSERT(g_params->len == sizeof(num) - 1); \
+ CU_ASSERT(memcmp(g_params->start, num, g_params->len) == 0); \
+ g_params++
+
+#define PARAM_NAME(str) \
+ CU_ASSERT(g_params->type == SPDK_JSON_VAL_NAME); \
+ CU_ASSERT(g_params->len == sizeof(str) - 1); \
+ CU_ASSERT(memcmp(g_params->start, str, g_params->len) == 0); \
+ g_params++
+
+#define PARAM_STRING(str) \
+ CU_ASSERT(g_params->type == SPDK_JSON_VAL_STRING); \
+ CU_ASSERT(g_params->len == sizeof(str) - 1); \
+ CU_ASSERT(memcmp(g_params->start, str, g_params->len) == 0); \
+ g_params++
+
+#define FREE_REQUEST() \
+ if (g_reqs->request) { \
+ free(g_reqs->request->send_buf); \
+ } \
+ free(g_reqs->request); \
+ g_reqs->request = NULL
+
+static void
+ut_handle(struct spdk_jsonrpc_request *request, int error, const struct spdk_json_val *method,
+ const struct spdk_json_val *params)
+{
+ const struct spdk_json_val *id = &request->id;
+ struct req *r;
+
+ SPDK_CU_ASSERT_FATAL(g_num_reqs != MAX_REQS);
+ r = &g_reqs[g_num_reqs++];
+
+ r->request = request;
+ r->error = error;
+
+ if (method) {
+ r->got_method = true;
+ r->method = *method;
+ } else {
+ r->got_method = false;
+ }
+
+ if (params) {
+ r->got_params = true;
+ SPDK_CU_ASSERT_FATAL(spdk_json_val_len(params) < MAX_PARAMS);
+ memcpy(r->params, params, spdk_json_val_len(params) * sizeof(struct spdk_json_val));
+ } else {
+ r->got_params = false;
+ }
+
+ if (id && id->type != SPDK_JSON_VAL_INVALID) {
+ r->got_id = true;
+ r->id = *id;
+ } else {
+ r->got_id = false;
+ }
+}
+
+void
+spdk_jsonrpc_server_handle_error(struct spdk_jsonrpc_request *request, int error)
+{
+ /*
+ * Map missing id to Null - this mirrors the behavior in the real
+ * spdk_jsonrpc_server_handle_error() function.
+ */
+ if (request->id.type == SPDK_JSON_VAL_INVALID) {
+ request->id.type = SPDK_JSON_VAL_NULL;
+ }
+
+ ut_handle(request, error, NULL, NULL);
+}
+
+void
+spdk_jsonrpc_server_handle_request(struct spdk_jsonrpc_request *request,
+ const struct spdk_json_val *method, const struct spdk_json_val *params)
+{
+ ut_handle(request, 0, method, params);
+}
+
+void
+spdk_jsonrpc_server_send_response(struct spdk_jsonrpc_request *request)
+{
+ /* TODO */
+}
+
+static void
+test_parse_request(void)
+{
+ struct spdk_jsonrpc_server *server;
+ struct spdk_jsonrpc_server_conn *conn;
+
+ server = calloc(1, sizeof(*server));
+ SPDK_CU_ASSERT_FATAL(server != NULL);
+
+ conn = calloc(1, sizeof(*conn));
+ SPDK_CU_ASSERT_FATAL(conn != NULL);
+
+ conn->server = server;
+
+ /* rpc call with positional parameters */
+ PARSE_PASS("{\"jsonrpc\":\"2.0\",\"method\":\"subtract\",\"params\":[42,23],\"id\":1}", "");
+ REQ_BEGIN_VALID();
+ REQ_METHOD("subtract");
+ REQ_ID_NUM("1");
+ REQ_PARAMS_BEGIN();
+ PARAM_ARRAY_BEGIN();
+ PARAM_NUM("42");
+ PARAM_NUM("23");
+ PARAM_ARRAY_END();
+ FREE_REQUEST();
+
+ /* rpc call with named parameters */
+ PARSE_PASS("{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": {\"subtrahend\": 23, \"minuend\": 42}, \"id\": 3}",
+ "");
+ REQ_BEGIN_VALID();
+ REQ_METHOD("subtract");
+ REQ_ID_NUM("3");
+ REQ_PARAMS_BEGIN();
+ PARAM_OBJECT_BEGIN();
+ PARAM_NAME("subtrahend");
+ PARAM_NUM("23");
+ PARAM_NAME("minuend");
+ PARAM_NUM("42");
+ PARAM_OBJECT_END();
+ FREE_REQUEST();
+
+ /* notification */
+ PARSE_PASS("{\"jsonrpc\": \"2.0\", \"method\": \"update\", \"params\": [1,2,3,4,5]}", "");
+ REQ_BEGIN_VALID();
+ REQ_METHOD("update");
+ REQ_ID_MISSING();
+ REQ_PARAMS_BEGIN();
+ PARAM_ARRAY_BEGIN();
+ PARAM_NUM("1");
+ PARAM_NUM("2");
+ PARAM_NUM("3");
+ PARAM_NUM("4");
+ PARAM_NUM("5");
+ PARAM_ARRAY_END();
+ FREE_REQUEST();
+
+ /* invalid JSON */
+ PARSE_FAIL("{\"jsonrpc\": \"2.0\", \"method\": \"foobar, \"params\": \"bar\", \"baz]");
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_PARSE_ERROR);
+ REQ_METHOD_MISSING();
+ REQ_ID_NULL();
+ REQ_PARAMS_MISSING();
+ FREE_REQUEST();
+
+ /* invalid request (method must be a string; params must be array or object) */
+ PARSE_PASS("{\"jsonrpc\": \"2.0\", \"method\": 1, \"params\": \"bar\"}", "");
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_INVALID_REQUEST);
+ REQ_METHOD_MISSING();
+ REQ_ID_NULL();
+ REQ_PARAMS_MISSING();
+ FREE_REQUEST();
+
+ /* batch, invalid JSON */
+ PARSE_FAIL(
+ "["
+ "{\"jsonrpc\": \"2.0\", \"method\": \"sum\", \"params\": [1,2,4], \"id\": \"1\"},"
+ "{\"jsonrpc\": \"2.0\", \"method\""
+ "]");
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_PARSE_ERROR);
+ REQ_METHOD_MISSING();
+ REQ_ID_NULL();
+ REQ_PARAMS_MISSING();
+ FREE_REQUEST();
+
+ /* empty array */
+ PARSE_PASS("[]", "");
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_INVALID_REQUEST);
+ REQ_METHOD_MISSING();
+ REQ_ID_NULL();
+ REQ_PARAMS_MISSING();
+ FREE_REQUEST();
+
+ /* batch - not supported */
+ PARSE_PASS(
+ "["
+ "{\"jsonrpc\": \"2.0\", \"method\": \"sum\", \"params\": [1,2,4], \"id\": \"1\"},"
+ "{\"jsonrpc\": \"2.0\", \"method\": \"notify_hello\", \"params\": [7]},"
+ "{\"jsonrpc\": \"2.0\", \"method\": \"subtract\", \"params\": [42,23], \"id\": \"2\"},"
+ "{\"foo\": \"boo\"},"
+ "{\"jsonrpc\": \"2.0\", \"method\": \"foo.get\", \"params\": {\"name\": \"myself\"}, \"id\": \"5\"},"
+ "{\"jsonrpc\": \"2.0\", \"method\": \"get_data\", \"id\": \"9\"}"
+ "]", "");
+
+ REQ_BEGIN_INVALID(SPDK_JSONRPC_ERROR_INVALID_REQUEST);
+ REQ_METHOD_MISSING();
+ REQ_ID_NULL();
+ REQ_PARAMS_MISSING();
+ FREE_REQUEST();
+
+ free(conn);
+ free(server);
+}
+
+static void
+test_parse_request_streaming(void)
+{
+ struct spdk_jsonrpc_server *server;
+ struct spdk_jsonrpc_server_conn *conn;
+ size_t len, i;
+
+ server = calloc(1, sizeof(*server));
+ SPDK_CU_ASSERT_FATAL(server != NULL);
+
+ conn = calloc(1, sizeof(*conn));
+ SPDK_CU_ASSERT_FATAL(conn != NULL);
+
+ conn->server = server;
+
+ /*
+ * Two valid requests end to end in the same buffer.
+ * Parse should return the first one and point to the beginning of the second one.
+ */
+ PARSE_PASS(
+ "{\"jsonrpc\":\"2.0\",\"method\":\"a\",\"params\":[1],\"id\":1}"
+ "{\"jsonrpc\":\"2.0\",\"method\":\"b\",\"params\":[2],\"id\":2}",
+ "{\"jsonrpc\":\"2.0\",\"method\":\"b\",\"params\":[2],\"id\":2}");
+ REQ_BEGIN_VALID();
+ REQ_METHOD("a");
+ REQ_ID_NUM("1");
+ REQ_PARAMS_BEGIN();
+ PARAM_ARRAY_BEGIN();
+ PARAM_NUM("1");
+ PARAM_ARRAY_END();
+ FREE_REQUEST();
+
+ /* Partial (but not invalid) requests - parse should not consume anything. */
+ snprintf(g_buf, sizeof(g_buf), "%s",
+ "{\"jsonrpc\":\"2.0\",\"method\":\"b\",\"params\":[2],\"id\":2}");
+ len = strlen(g_buf);
+
+ /* Try every partial length up to the full request length */
+ for (i = 0; i < len; i++) {
+ int rc = spdk_jsonrpc_parse_request(conn, g_buf, i);
+ /* Partial request - no data consumed */
+ CU_ASSERT(rc == 0);
+ FREE_REQUEST();
+ }
+
+ /* Verify that full request can be parsed successfully */
+ CU_ASSERT(spdk_jsonrpc_parse_request(conn, g_buf, len) == (ssize_t)len);
+ FREE_REQUEST();
+
+ free(conn);
+ free(server);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("jsonrpc", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "parse_request", test_parse_request) == NULL ||
+ CU_add_test(suite, "parse_request_streaming", test_parse_request_streaming) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/log/Makefile b/src/spdk/test/unit/lib/log/Makefile
new file mode 100644
index 00000000..79411a45
--- /dev/null
+++ b/src/spdk/test/unit/lib/log/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = log.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/log/log.c/.gitignore b/src/spdk/test/unit/lib/log/log.c/.gitignore
new file mode 100644
index 00000000..60261c07
--- /dev/null
+++ b/src/spdk/test/unit/lib/log/log.c/.gitignore
@@ -0,0 +1 @@
+log_ut
diff --git a/src/spdk/test/unit/lib/log/log.c/Makefile b/src/spdk/test/unit/lib/log/log.c/Makefile
new file mode 100644
index 00000000..deedd9fb
--- /dev/null
+++ b/src/spdk/test/unit/lib/log/log.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = log_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/log/log.c/log_ut.c b/src/spdk/test/unit/lib/log/log.c/log_ut.c
new file mode 100644
index 00000000..17650f71
--- /dev/null
+++ b/src/spdk/test/unit/lib/log/log.c/log_ut.c
@@ -0,0 +1,113 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "spdk/log.h"
+
+#include "log/log.c"
+#include "log/log_flags.c"
+
+static void
+log_test(void)
+{
+ spdk_log_set_level(SPDK_LOG_ERROR);
+ CU_ASSERT_EQUAL(spdk_log_get_level(), SPDK_LOG_ERROR);
+ spdk_log_set_level(SPDK_LOG_WARN);
+ CU_ASSERT_EQUAL(spdk_log_get_level(), SPDK_LOG_WARN);
+ spdk_log_set_level(SPDK_LOG_NOTICE);
+ CU_ASSERT_EQUAL(spdk_log_get_level(), SPDK_LOG_NOTICE);
+ spdk_log_set_level(SPDK_LOG_INFO);
+ CU_ASSERT_EQUAL(spdk_log_get_level(), SPDK_LOG_INFO);
+ spdk_log_set_level(SPDK_LOG_DEBUG);
+ CU_ASSERT_EQUAL(spdk_log_get_level(), SPDK_LOG_DEBUG);
+
+ spdk_log_set_print_level(SPDK_LOG_ERROR);
+ CU_ASSERT_EQUAL(spdk_log_get_print_level(), SPDK_LOG_ERROR);
+ spdk_log_set_print_level(SPDK_LOG_WARN);
+ CU_ASSERT_EQUAL(spdk_log_get_print_level(), SPDK_LOG_WARN);
+ spdk_log_set_print_level(SPDK_LOG_NOTICE);
+ CU_ASSERT_EQUAL(spdk_log_get_print_level(), SPDK_LOG_NOTICE);
+ spdk_log_set_print_level(SPDK_LOG_INFO);
+ CU_ASSERT_EQUAL(spdk_log_get_print_level(), SPDK_LOG_INFO);
+ spdk_log_set_print_level(SPDK_LOG_DEBUG);
+ CU_ASSERT_EQUAL(spdk_log_get_print_level(), SPDK_LOG_DEBUG);
+
+#ifdef DEBUG
+ CU_ASSERT(spdk_log_get_trace_flag("log") == false);
+
+ spdk_log_set_trace_flag("log");
+ CU_ASSERT(spdk_log_get_trace_flag("log") == true);
+
+ spdk_log_clear_trace_flag("log");
+ CU_ASSERT(spdk_log_get_trace_flag("log") == false);
+#endif
+
+ spdk_log_open();
+ spdk_log_set_trace_flag("log");
+ SPDK_WARNLOG("log warning unit test\n");
+ SPDK_DEBUGLOG(SPDK_LOG_LOG, "log trace test\n");
+ SPDK_TRACEDUMP(SPDK_LOG_LOG, "log trace dump test:", "trace dump", 10);
+ spdk_trace_dump(stderr, "spdk dump test:", "spdk dump", 9);
+
+ spdk_log_close();
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("log", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "log_ut", log_test) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/lvol/Makefile b/src/spdk/test/unit/lib/lvol/Makefile
new file mode 100644
index 00000000..c9276de4
--- /dev/null
+++ b/src/spdk/test/unit/lib/lvol/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = lvol.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/lvol/lvol.c/.gitignore b/src/spdk/test/unit/lib/lvol/lvol.c/.gitignore
new file mode 100644
index 00000000..57e92bfe
--- /dev/null
+++ b/src/spdk/test/unit/lib/lvol/lvol.c/.gitignore
@@ -0,0 +1 @@
+lvol_ut
diff --git a/src/spdk/test/unit/lib/lvol/lvol.c/Makefile b/src/spdk/test/unit/lib/lvol/lvol.c/Makefile
new file mode 100644
index 00000000..917f4ef6
--- /dev/null
+++ b/src/spdk/test/unit/lib/lvol/lvol.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = lvol_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/lvol/lvol.c/lvol_ut.c b/src/spdk/test/unit/lib/lvol/lvol.c/lvol_ut.c
new file mode 100644
index 00000000..0aebbe1a
--- /dev/null
+++ b/src/spdk/test/unit/lib/lvol/lvol.c/lvol_ut.c
@@ -0,0 +1,2127 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+#include "spdk/blob.h"
+#include "spdk/thread.h"
+#include "spdk/util.h"
+
+#include "common/lib/test_env.c"
+
+#include "lvol/lvol.c"
+
+#define DEV_BUFFER_SIZE (64 * 1024 * 1024)
+#define DEV_BUFFER_BLOCKLEN (4096)
+#define DEV_BUFFER_BLOCKCNT (DEV_BUFFER_SIZE / DEV_BUFFER_BLOCKLEN)
+#define BS_CLUSTER_SIZE (1024 * 1024)
+#define BS_FREE_CLUSTERS (DEV_BUFFER_SIZE / BS_CLUSTER_SIZE)
+#define BS_PAGE_SIZE (4096)
+
+#define SPDK_BLOB_OPTS_CLUSTER_SZ (1024 * 1024)
+#define SPDK_BLOB_OPTS_NUM_MD_PAGES UINT32_MAX
+#define SPDK_BLOB_OPTS_MAX_MD_OPS 32
+#define SPDK_BLOB_OPTS_MAX_CHANNEL_OPS 512
+
+#define SPDK_BLOB_THIN_PROV (1ULL << 0)
+
+const char *uuid = "828d9766-ae50-11e7-bd8d-001e67edf350";
+
+struct spdk_blob {
+ spdk_blob_id id;
+ uint32_t ref;
+ struct spdk_blob_store *bs;
+ int close_status;
+ int open_status;
+ int load_status;
+ TAILQ_ENTRY(spdk_blob) link;
+ char uuid[SPDK_UUID_STRING_LEN];
+ char name[SPDK_LVS_NAME_MAX];
+ bool thin_provisioned;
+};
+
+int g_lvolerrno;
+int g_lvserrno;
+int g_close_super_status;
+int g_resize_rc;
+int g_inflate_rc;
+bool g_lvs_rename_blob_open_error = false;
+struct spdk_lvol_store *g_lvol_store;
+struct spdk_lvol *g_lvol;
+spdk_blob_id g_blobid = 1;
+struct spdk_io_channel *g_io_channel;
+
+struct spdk_blob_store {
+ struct spdk_bs_opts bs_opts;
+ spdk_blob_id super_blobid;
+ TAILQ_HEAD(, spdk_blob) blobs;
+ int get_super_status;
+};
+
+struct lvol_ut_bs_dev {
+ struct spdk_bs_dev bs_dev;
+ int init_status;
+ int load_status;
+ struct spdk_blob_store *bs;
+};
+
+void spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
+ spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, g_inflate_rc);
+}
+
+void spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
+ spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, g_inflate_rc);
+}
+
+void
+spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *b,
+ spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_blob *next;
+ int _errno = 0;
+
+ next = TAILQ_NEXT(b, link);
+ if (next == NULL) {
+ _errno = -ENOENT;
+ } else if (next->load_status != 0) {
+ _errno = next->load_status;
+ }
+
+ cb_fn(cb_arg, next, _errno);
+}
+
+void
+spdk_bs_iter_first(struct spdk_blob_store *bs,
+ spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_blob *first;
+ int _errno = 0;
+
+ first = TAILQ_FIRST(&bs->blobs);
+ if (first == NULL) {
+ _errno = -ENOENT;
+ } else if (first->load_status != 0) {
+ _errno = first->load_status;
+ }
+
+ cb_fn(cb_arg, first, _errno);
+}
+
+uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob)
+{
+ return 0;
+}
+
+void
+spdk_bs_get_super(struct spdk_blob_store *bs,
+ spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
+{
+ if (bs->get_super_status != 0) {
+ cb_fn(cb_arg, 0, bs->get_super_status);
+ } else {
+ cb_fn(cb_arg, bs->super_blobid, 0);
+ }
+}
+
+void
+spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid,
+ spdk_bs_op_complete cb_fn, void *cb_arg)
+{
+ bs->super_blobid = blobid;
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts,
+ spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct lvol_ut_bs_dev *ut_dev = SPDK_CONTAINEROF(dev, struct lvol_ut_bs_dev, bs_dev);
+ struct spdk_blob_store *bs = NULL;
+
+ if (ut_dev->load_status == 0) {
+ bs = ut_dev->bs;
+ }
+
+ cb_fn(cb_arg, bs, ut_dev->load_status);
+}
+
+struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs)
+{
+ if (g_io_channel == NULL) {
+ g_io_channel = calloc(1, sizeof(struct spdk_io_channel));
+ SPDK_CU_ASSERT_FATAL(g_io_channel != NULL);
+ }
+ g_io_channel->ref++;
+ return g_io_channel;
+}
+
+void spdk_bs_free_io_channel(struct spdk_io_channel *channel)
+{
+ g_io_channel->ref--;
+ if (g_io_channel->ref == 0) {
+ free(g_io_channel);
+ g_io_channel = NULL;
+ }
+ return;
+}
+
+int
+spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
+ uint16_t value_len)
+{
+ if (!strcmp(name, "uuid")) {
+ CU_ASSERT(value_len == SPDK_UUID_STRING_LEN);
+ memcpy(blob->uuid, value, SPDK_UUID_STRING_LEN);
+ } else if (!strcmp(name, "name")) {
+ CU_ASSERT(value_len <= SPDK_LVS_NAME_MAX);
+ memcpy(blob->name, value, value_len);
+ }
+
+ return 0;
+}
+
+int
+spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
+ const void **value, size_t *value_len)
+{
+ if (!strcmp(name, "uuid") && strnlen(blob->uuid, SPDK_UUID_STRING_LEN) != 0) {
+ CU_ASSERT(strnlen(blob->uuid, SPDK_UUID_STRING_LEN) == (SPDK_UUID_STRING_LEN - 1));
+ *value = blob->uuid;
+ *value_len = SPDK_UUID_STRING_LEN;
+ return 0;
+ } else if (!strcmp(name, "name") && strnlen(blob->name, SPDK_LVS_NAME_MAX) != 0) {
+ *value = blob->name;
+ *value_len = strnlen(blob->name, SPDK_LVS_NAME_MAX) + 1;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+int
+spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids,
+ size_t *count)
+{
+ return 0;
+}
+
+uint64_t
+spdk_bs_get_page_size(struct spdk_blob_store *bs)
+{
+ return BS_PAGE_SIZE;
+}
+
+int
+spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
+{
+ bdev->blockcnt = size;
+ return 0;
+}
+
+static void
+init_dev(struct lvol_ut_bs_dev *dev)
+{
+ memset(dev, 0, sizeof(*dev));
+ dev->bs_dev.blockcnt = DEV_BUFFER_BLOCKCNT;
+ dev->bs_dev.blocklen = DEV_BUFFER_BLOCKLEN;
+}
+
+static void
+free_dev(struct lvol_ut_bs_dev *dev)
+{
+ struct spdk_blob_store *bs = dev->bs;
+ struct spdk_blob *blob, *tmp;
+
+ if (bs == NULL) {
+ return;
+ }
+
+ TAILQ_FOREACH_SAFE(blob, &bs->blobs, link, tmp) {
+ TAILQ_REMOVE(&bs->blobs, blob, link);
+ free(blob);
+ }
+
+ free(bs);
+ dev->bs = NULL;
+}
+
+void
+spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
+ spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct lvol_ut_bs_dev *ut_dev = SPDK_CONTAINEROF(dev, struct lvol_ut_bs_dev, bs_dev);
+ struct spdk_blob_store *bs;
+
+ bs = calloc(1, sizeof(*bs));
+ SPDK_CU_ASSERT_FATAL(bs != NULL);
+
+ TAILQ_INIT(&bs->blobs);
+
+ ut_dev->bs = bs;
+
+ memcpy(&bs->bs_opts, o, sizeof(struct spdk_bs_opts));
+
+ cb_fn(cb_arg, bs, 0);
+}
+
+void
+spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn,
+ void *cb_arg)
+{
+ free(bs);
+
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
+ spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ struct spdk_blob *blob;
+
+ TAILQ_FOREACH(blob, &bs->blobs, link) {
+ if (blob->id == blobid) {
+ TAILQ_REMOVE(&bs->blobs, blob, link);
+ free(blob);
+ break;
+ }
+ }
+
+ cb_fn(cb_arg, 0);
+}
+
+spdk_blob_id
+spdk_blob_get_id(struct spdk_blob *blob)
+{
+ return blob->id;
+}
+
+void
+spdk_bs_opts_init(struct spdk_bs_opts *opts)
+{
+ opts->cluster_sz = SPDK_BLOB_OPTS_CLUSTER_SZ;
+ opts->num_md_pages = SPDK_BLOB_OPTS_NUM_MD_PAGES;
+ opts->max_md_ops = SPDK_BLOB_OPTS_MAX_MD_OPS;
+ opts->max_channel_ops = SPDK_BLOB_OPTS_MAX_CHANNEL_OPS;
+ memset(&opts->bstype, 0, sizeof(opts->bstype));
+}
+
+uint64_t
+spdk_bs_get_cluster_size(struct spdk_blob_store *bs)
+{
+ return BS_CLUSTER_SIZE;
+}
+
+void spdk_blob_close(struct spdk_blob *b, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ b->ref--;
+
+ cb_fn(cb_arg, b->close_status);
+}
+
+void
+spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ if (g_resize_rc != 0) {
+ return cb_fn(cb_arg, g_resize_rc);
+ } else if (sz > DEV_BUFFER_SIZE / BS_CLUSTER_SIZE) {
+ return cb_fn(cb_arg, -ENOMEM);
+ }
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
+{
+ cb_fn(cb_arg, 0);
+}
+
+void
+spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
+ spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
+{
+ struct spdk_blob *blob;
+
+ if (!g_lvs_rename_blob_open_error) {
+ TAILQ_FOREACH(blob, &bs->blobs, link) {
+ if (blob->id == blobid) {
+ blob->ref++;
+ cb_fn(cb_arg, blob, blob->open_status);
+ return;
+ }
+ }
+ }
+
+ cb_fn(cb_arg, NULL, -ENOENT);
+}
+
+uint64_t
+spdk_bs_free_cluster_count(struct spdk_blob_store *bs)
+{
+ return BS_FREE_CLUSTERS;
+}
+
+void
+spdk_blob_opts_init(struct spdk_blob_opts *opts)
+{
+ opts->num_clusters = 0;
+ opts->thin_provision = false;
+ opts->xattrs.count = 0;
+ opts->xattrs.names = NULL;
+ opts->xattrs.ctx = NULL;
+ opts->xattrs.get_value = NULL;
+}
+
+void
+spdk_bs_create_blob(struct spdk_blob_store *bs,
+ spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
+{
+ spdk_bs_create_blob_ext(bs, NULL, cb_fn, cb_arg);
+}
+
+void
+spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts,
+ spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
+{
+ struct spdk_blob *b;
+
+ if (opts && opts->num_clusters > DEV_BUFFER_SIZE / BS_CLUSTER_SIZE) {
+ cb_fn(cb_arg, 0, -1);
+ return;
+ }
+
+ b = calloc(1, sizeof(*b));
+ SPDK_CU_ASSERT_FATAL(b != NULL);
+
+ b->id = g_blobid++;
+ if (opts != NULL && opts->thin_provision) {
+ b->thin_provisioned = true;
+ }
+ b->bs = bs;
+
+ TAILQ_INSERT_TAIL(&bs->blobs, b, link);
+ cb_fn(cb_arg, b->id, 0);
+}
+
+void
+spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid,
+ const struct spdk_blob_xattr_opts *snapshot_xattrs,
+ spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
+{
+ spdk_bs_create_blob_ext(bs, NULL, cb_fn, cb_arg);
+}
+
+void
+spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid,
+ const struct spdk_blob_xattr_opts *clone_xattrs,
+ spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
+{
+ spdk_bs_create_blob_ext(bs, NULL, cb_fn, cb_arg);
+}
+
+static void
+_lvol_send_msg(spdk_thread_fn fn, void *ctx, void *thread_ctx)
+{
+ fn(ctx);
+}
+
+static void
+lvol_store_op_with_handle_complete(void *cb_arg, struct spdk_lvol_store *lvol_store, int lvserrno)
+{
+ g_lvol_store = lvol_store;
+ g_lvserrno = lvserrno;
+}
+
+static void
+lvol_op_complete(void *cb_arg, int lvolerrno)
+{
+ g_lvolerrno = lvolerrno;
+}
+
+static void
+lvol_op_with_handle_complete(void *cb_arg, struct spdk_lvol *lvol, int lvserrno)
+{
+ g_lvol = lvol;
+ g_lvserrno = lvserrno;
+}
+
+static void
+lvol_store_op_complete(void *cb_arg, int lvserrno)
+{
+ g_lvserrno = lvserrno;
+}
+
+static void
+close_cb(void *cb_arg, int lvolerrno)
+{
+ g_lvserrno = lvolerrno;
+}
+
+static void
+destroy_cb(void *cb_arg, int lvolerrno)
+{
+ g_lvserrno = lvolerrno;
+}
+
+static void
+lvs_init_unload_success(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(!TAILQ_EMPTY(&g_lvol_stores));
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ /* Lvol store has an open lvol, this unload should fail. */
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == -EBUSY);
+ CU_ASSERT(g_lvserrno == -EBUSY);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(!TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Lvol has to be closed (or destroyed) before unloading lvol store. */
+ spdk_lvol_close(g_lvol, close_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ free_dev(&dev);
+
+ spdk_free_thread();
+}
+
+static void
+lvs_init_destroy_success(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ /* Lvol store contains one lvol, this destroy should fail. */
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == -EBUSY);
+ CU_ASSERT(g_lvserrno == -EBUSY);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_close(g_lvol, close_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ spdk_lvol_destroy(g_lvol, destroy_cb, NULL);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ spdk_free_thread();
+}
+
+static void
+lvs_init_opts_success(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ g_lvserrno = -1;
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+ opts.cluster_sz = 8192;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(dev.bs->bs_opts.cluster_sz == opts.cluster_sz);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+
+ spdk_free_thread();
+}
+
+static void
+lvs_unload_lvs_is_null_fail(void)
+{
+ int rc = 0;
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(NULL, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == -ENODEV);
+ CU_ASSERT(g_lvserrno == -1);
+
+ spdk_free_thread();
+}
+
+static void
+lvs_names(void)
+{
+ struct lvol_ut_bs_dev dev_x, dev_y, dev_x2;
+ struct spdk_lvs_opts opts_none, opts_x, opts_y, opts_full;
+ struct spdk_lvol_store *lvs_x, *lvs_y, *lvs_x2;
+ int rc = 0;
+
+ init_dev(&dev_x);
+ init_dev(&dev_y);
+ init_dev(&dev_x2);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ spdk_lvs_opts_init(&opts_none);
+ spdk_lvs_opts_init(&opts_x);
+ opts_x.name[0] = 'x';
+ spdk_lvs_opts_init(&opts_y);
+ opts_y.name[0] = 'y';
+ spdk_lvs_opts_init(&opts_full);
+ memset(opts_full.name, 'a', sizeof(opts_full.name));
+
+ /* Test that opts with no name fails spdk_lvs_init(). */
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+ rc = spdk_lvs_init(&dev_x.bs_dev, &opts_none, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Test that opts with no null terminator for name fails spdk_lvs_init(). */
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+ rc = spdk_lvs_init(&dev_x.bs_dev, &opts_full, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Test that we can create an lvolstore with name 'x'. */
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev_x.bs_dev, &opts_x, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&g_lvol_stores));
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs_x = g_lvol_store;
+
+ /* Test that we can create an lvolstore with name 'y'. */
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev_y.bs_dev, &opts_y, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs_y = g_lvol_store;
+
+ /* Test that we cannot create another lvolstore with name 'x'. */
+ rc = spdk_lvs_init(&dev_x2.bs_dev, &opts_x, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == -EEXIST);
+
+ /* Now destroy lvolstore 'x' and then confirm we can create a new lvolstore with name 'x'. */
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs_x, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev_x.bs_dev, &opts_x, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs_x = g_lvol_store;
+
+ /*
+ * Unload lvolstore 'x'. Then we should be able to create another lvolstore with name 'x'.
+ */
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(lvs_x, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev_x2.bs_dev, &opts_x, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs_x2 = g_lvol_store;
+
+ /* Confirm that we cannot load the first lvolstore 'x'. */
+ g_lvserrno = 0;
+ spdk_lvs_load(&dev_x.bs_dev, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+
+ /* Destroy the second lvolstore 'x'. Then we should be able to load the first lvolstore 'x'. */
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs_x2, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+ spdk_lvs_load(&dev_x.bs_dev, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs_x = g_lvol_store;
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs_x, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs_y, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+
+ spdk_free_thread();
+}
+
+static void
+lvol_create_destroy_success(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ spdk_lvol_close(g_lvol, close_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_destroy(g_lvol, destroy_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+
+ spdk_free_thread();
+}
+
+static void
+lvol_create_fail(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvol_store = NULL;
+ g_lvserrno = 0;
+ rc = spdk_lvs_init(NULL, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ g_lvol = NULL;
+ rc = spdk_lvol_create(NULL, "lvol", 10, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc != 0);
+ CU_ASSERT(g_lvol == NULL);
+
+ g_lvol = NULL;
+ rc = spdk_lvol_create(g_lvol_store, "lvol", DEV_BUFFER_SIZE + 1, false,
+ lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno != 0);
+ CU_ASSERT(g_lvol == NULL);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+
+ spdk_free_thread();
+}
+
+static void
+lvol_destroy_fail(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ spdk_lvol_close(g_lvol, close_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_destroy(g_lvol, destroy_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+
+ spdk_free_thread();
+}
+
+static void
+lvol_close_fail(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ spdk_lvol_close(g_lvol, close_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+
+ spdk_free_thread();
+}
+
+static void
+lvol_close_success(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ spdk_lvol_close(g_lvol, close_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+
+ spdk_free_thread();
+}
+
+static void
+lvol_resize(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_resize_rc = 0;
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ /* Resize to same size */
+ spdk_lvol_resize(g_lvol, 10, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ /* Resize to smaller size */
+ spdk_lvol_resize(g_lvol, 5, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ /* Resize to bigger size */
+ spdk_lvol_resize(g_lvol, 15, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ /* Resize to size = 0 */
+ spdk_lvol_resize(g_lvol, 0, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ /* Resize to bigger size than available */
+ g_lvserrno = 0;
+ spdk_lvol_resize(g_lvol, 0xFFFFFFFF, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+
+ /* Fail resize */
+ g_resize_rc = -1;
+ g_lvserrno = 0;
+ spdk_lvol_resize(g_lvol, 10, lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+ g_resize_rc = 0;
+
+ g_resize_rc = 0;
+ spdk_lvol_close(g_lvol, close_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_destroy(g_lvol, destroy_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+
+ spdk_free_thread();
+}
+
+static void
+null_cb(void *ctx, struct spdk_blob_store *bs, int bserrno)
+{
+ SPDK_CU_ASSERT_FATAL(bs != NULL);
+}
+
+static void
+lvs_load(void)
+{
+ int rc = -1;
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_with_handle_req *req;
+ struct spdk_bs_opts bs_opts = {};
+ struct spdk_blob *super_blob;
+
+ req = calloc(1, sizeof(*req));
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ init_dev(&dev);
+ spdk_bs_opts_init(&bs_opts);
+ snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "LVOLSTORE");
+ spdk_bs_init(&dev.bs_dev, &bs_opts, null_cb, NULL);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ /* Fail on bs load */
+ dev.load_status = -1;
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Fail on getting super blob */
+ dev.load_status = 0;
+ dev.bs->get_super_status = -1;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == -ENODEV);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Fail on opening super blob */
+ g_lvserrno = 0;
+ super_blob = calloc(1, sizeof(*super_blob));
+ super_blob->id = 0x100;
+ super_blob->open_status = -1;
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, super_blob, link);
+ dev.bs->super_blobid = 0x100;
+ dev.bs->get_super_status = 0;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == -ENODEV);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Fail on getting uuid */
+ g_lvserrno = 0;
+ super_blob->open_status = 0;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == -EINVAL);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Fail on getting name */
+ g_lvserrno = 0;
+ spdk_blob_set_xattr(super_blob, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == -EINVAL);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Fail on closing super blob */
+ g_lvserrno = 0;
+ spdk_blob_set_xattr(super_blob, "name", "lvs", strnlen("lvs", SPDK_LVS_NAME_MAX) + 1);
+ super_blob->close_status = -1;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == -ENODEV);
+ CU_ASSERT(g_lvol_store == NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ /* Load successfully */
+ g_lvserrno = 0;
+ super_blob->close_status = 0;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store != NULL);
+ CU_ASSERT(!TAILQ_EMPTY(&g_lvol_stores));
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_lvol_stores));
+
+ free(req);
+ free_dev(&dev);
+
+ spdk_free_thread();
+}
+
+static void
+lvols_load(void)
+{
+ int rc = -1;
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_with_handle_req *req;
+ struct spdk_bs_opts bs_opts;
+ struct spdk_blob *super_blob, *blob1, *blob2, *blob3;
+
+ req = calloc(1, sizeof(*req));
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ init_dev(&dev);
+ spdk_bs_opts_init(&bs_opts);
+ snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "LVOLSTORE");
+ spdk_bs_init(&dev.bs_dev, &bs_opts, null_cb, NULL);
+ super_blob = calloc(1, sizeof(*super_blob));
+ SPDK_CU_ASSERT_FATAL(super_blob != NULL);
+ super_blob->id = 0x100;
+ spdk_blob_set_xattr(super_blob, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(super_blob, "name", "lvs", strnlen("lvs", SPDK_LVS_NAME_MAX) + 1);
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, super_blob, link);
+ dev.bs->super_blobid = 0x100;
+
+ /*
+ * Create 3 blobs, write different char values to the last char in the UUID
+ * to make sure they are unique.
+ */
+ blob1 = calloc(1, sizeof(*blob1));
+ SPDK_CU_ASSERT_FATAL(blob1 != NULL);
+ blob1->id = 0x1;
+ spdk_blob_set_xattr(blob1, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(blob1, "name", "lvol1", strnlen("lvol1", SPDK_LVOL_NAME_MAX) + 1);
+ blob1->uuid[SPDK_UUID_STRING_LEN - 2] = '1';
+
+ blob2 = calloc(1, sizeof(*blob2));
+ SPDK_CU_ASSERT_FATAL(blob2 != NULL);
+ blob2->id = 0x2;
+ spdk_blob_set_xattr(blob2, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(blob2, "name", "lvol2", strnlen("lvol2", SPDK_LVOL_NAME_MAX) + 1);
+ blob2->uuid[SPDK_UUID_STRING_LEN - 2] = '2';
+
+ blob3 = calloc(1, sizeof(*blob3));
+ SPDK_CU_ASSERT_FATAL(blob3 != NULL);
+ blob3->id = 0x2;
+ spdk_blob_set_xattr(blob3, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(blob3, "name", "lvol3", strnlen("lvol3", SPDK_LVOL_NAME_MAX) + 1);
+ blob3->uuid[SPDK_UUID_STRING_LEN - 2] = '3';
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ /* Load lvs with 0 blobs */
+ g_lvserrno = 0;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT(g_lvol_store != NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, blob1, link);
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, blob2, link);
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, blob3, link);
+
+ /* Load lvs again with 3 blobs, but fail on 1st one */
+ g_lvol_store = NULL;
+ g_lvserrno = 0;
+ blob1->load_status = -1;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ /* Load lvs again with 3 blobs, but fail on 3rd one */
+ g_lvol_store = NULL;
+ g_lvserrno = 0;
+ blob1->load_status = 0;
+ blob2->load_status = 0;
+ blob3->load_status = -1;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno != 0);
+ CU_ASSERT(g_lvol_store == NULL);
+
+ /* Load lvs again with 3 blobs, with success */
+ g_lvol_store = NULL;
+ g_lvserrno = 0;
+ blob1->load_status = 0;
+ blob2->load_status = 0;
+ blob3->load_status = 0;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ CU_ASSERT(!TAILQ_EMPTY(&g_lvol_store->lvols));
+
+ g_lvserrno = -1;
+ /* rc = */ spdk_lvs_unload(g_lvol_store, lvol_store_op_complete, NULL);
+ /*
+ * Disable these two asserts for now. lvolstore should allow unload as long
+ * as the lvols were not opened - but this is coming a future patch.
+ */
+ /* CU_ASSERT(rc == 0); */
+ /* CU_ASSERT(g_lvserrno == 0); */
+
+ free(req);
+ free_dev(&dev);
+
+ spdk_free_thread();
+}
+
+static void
+lvol_open(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_with_handle_req *req;
+ struct spdk_bs_opts bs_opts;
+ struct spdk_blob *super_blob, *blob1, *blob2, *blob3;
+ struct spdk_lvol *lvol, *tmp;
+
+ req = calloc(1, sizeof(*req));
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ init_dev(&dev);
+ spdk_bs_opts_init(&bs_opts);
+ snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "LVOLSTORE");
+ spdk_bs_init(&dev.bs_dev, &bs_opts, null_cb, NULL);
+ super_blob = calloc(1, sizeof(*super_blob));
+ SPDK_CU_ASSERT_FATAL(super_blob != NULL);
+ super_blob->id = 0x100;
+ spdk_blob_set_xattr(super_blob, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(super_blob, "name", "lvs", strnlen("lvs", SPDK_LVS_NAME_MAX) + 1);
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, super_blob, link);
+ dev.bs->super_blobid = 0x100;
+
+ /*
+ * Create 3 blobs, write different char values to the last char in the UUID
+ * to make sure they are unique.
+ */
+ blob1 = calloc(1, sizeof(*blob1));
+ SPDK_CU_ASSERT_FATAL(blob1 != NULL);
+ blob1->id = 0x1;
+ spdk_blob_set_xattr(blob1, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(blob1, "name", "lvol1", strnlen("lvol1", SPDK_LVOL_NAME_MAX) + 1);
+ blob1->uuid[SPDK_UUID_STRING_LEN - 2] = '1';
+
+ blob2 = calloc(1, sizeof(*blob2));
+ SPDK_CU_ASSERT_FATAL(blob2 != NULL);
+ blob2->id = 0x2;
+ spdk_blob_set_xattr(blob2, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(blob2, "name", "lvol2", strnlen("lvol2", SPDK_LVOL_NAME_MAX) + 1);
+ blob2->uuid[SPDK_UUID_STRING_LEN - 2] = '2';
+
+ blob3 = calloc(1, sizeof(*blob3));
+ SPDK_CU_ASSERT_FATAL(blob3 != NULL);
+ blob3->id = 0x2;
+ spdk_blob_set_xattr(blob3, "uuid", uuid, SPDK_UUID_STRING_LEN);
+ spdk_blob_set_xattr(blob3, "name", "lvol3", strnlen("lvol3", SPDK_LVOL_NAME_MAX) + 1);
+ blob3->uuid[SPDK_UUID_STRING_LEN - 2] = '3';
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, blob1, link);
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, blob2, link);
+ TAILQ_INSERT_TAIL(&dev.bs->blobs, blob3, link);
+
+ /* Load lvs with 3 blobs */
+ g_lvol_store = NULL;
+ g_lvserrno = 0;
+ spdk_lvs_load(&dev.bs_dev, lvol_store_op_with_handle_complete, req);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_lvol_stores));
+
+ blob1->open_status = -1;
+ blob2->open_status = -1;
+ blob3->open_status = -1;
+
+ /* Fail opening all lvols */
+ TAILQ_FOREACH_SAFE(lvol, &g_lvol_store->lvols, link, tmp) {
+ spdk_lvol_open(lvol, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+ }
+
+ blob1->open_status = 0;
+ blob2->open_status = 0;
+ blob3->open_status = 0;
+
+ /* Open all lvols */
+ TAILQ_FOREACH_SAFE(lvol, &g_lvol_store->lvols, link, tmp) {
+ spdk_lvol_open(lvol, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ }
+
+ /* Close all lvols */
+ TAILQ_FOREACH_SAFE(lvol, &g_lvol_store->lvols, link, tmp) {
+ spdk_lvol_close(lvol, lvol_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ }
+
+ g_lvserrno = -1;
+ spdk_lvs_destroy(g_lvol_store, lvol_store_op_complete, NULL);
+
+ free(req);
+ free(blob1);
+ free(blob2);
+ free(blob3);
+
+ spdk_free_thread();
+}
+
+static void
+lvol_snapshot(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvol *lvol;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, true, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ lvol = g_lvol;
+
+ spdk_lvol_create_snapshot(lvol, "snap", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT_STRING_EQUAL(g_lvol->name, "snap");
+
+ /* Lvol has to be closed (or destroyed) before unloading lvol store. */
+ spdk_lvol_close(g_lvol, close_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ spdk_lvol_close(lvol, close_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ rc = spdk_lvs_unload(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+
+ spdk_free_thread();
+}
+
+static void
+lvol_snapshot_fail(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvol *lvol, *snap;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, true, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ lvol = g_lvol;
+
+ spdk_lvol_create_snapshot(NULL, "snap", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol == NULL);
+
+ spdk_lvol_create_snapshot(lvol, "", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol == NULL);
+
+ spdk_lvol_create_snapshot(lvol, NULL, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol == NULL);
+
+ spdk_lvol_create_snapshot(lvol, "snap", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT_STRING_EQUAL(g_lvol->name, "snap");
+
+ snap = g_lvol;
+
+ spdk_lvol_create_snapshot(lvol, "snap", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+
+ spdk_lvol_close(lvol, close_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ spdk_lvol_close(snap, close_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ rc = spdk_lvs_unload(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+
+ spdk_free_thread();
+}
+
+static void
+lvol_clone(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvol *lvol;
+ struct spdk_lvol *snap;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, true, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ lvol = g_lvol;
+
+ spdk_lvol_create_snapshot(lvol, "snap", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT_STRING_EQUAL(g_lvol->name, "snap");
+
+ snap = g_lvol;
+
+ spdk_lvol_create_clone(snap, "clone", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT_STRING_EQUAL(g_lvol->name, "clone");
+
+ /* Lvol has to be closed (or destroyed) before unloading lvol store. */
+ spdk_lvol_close(g_lvol, close_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ spdk_lvol_close(snap, close_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ spdk_lvol_close(lvol, close_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ rc = spdk_lvs_unload(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+
+ spdk_free_thread();
+}
+
+static void
+lvol_clone_fail(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvol *lvol;
+ struct spdk_lvol *snap;
+ struct spdk_lvol *clone;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, true, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ lvol = g_lvol;
+
+ spdk_lvol_create_snapshot(lvol, "snap", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT_STRING_EQUAL(g_lvol->name, "snap");
+
+ snap = g_lvol;
+
+ spdk_lvol_create_clone(NULL, "clone", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+
+ spdk_lvol_create_clone(snap, "", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+
+ spdk_lvol_create_clone(snap, NULL, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+
+ spdk_lvol_create_clone(snap, "clone", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT_STRING_EQUAL(g_lvol->name, "clone");
+
+ clone = g_lvol;
+
+ spdk_lvol_create_clone(snap, "clone", lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno < 0);
+
+ /* Lvol has to be closed (or destroyed) before unloading lvol store. */
+ spdk_lvol_close(clone, close_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ spdk_lvol_close(snap, close_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ spdk_lvol_close(lvol, close_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvserrno = -1;
+
+ rc = spdk_lvs_unload(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+
+ spdk_free_thread();
+}
+
+static void
+lvol_names(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ struct spdk_lvol_store *lvs;
+ struct spdk_lvol *lvol, *lvol2;
+ char fullname[SPDK_LVOL_NAME_MAX];
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs = g_lvol_store;
+
+ rc = spdk_lvol_create(lvs, NULL, 1, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == -EINVAL);
+
+ rc = spdk_lvol_create(lvs, "", 1, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == -EINVAL);
+
+ memset(fullname, 'x', sizeof(fullname));
+ rc = spdk_lvol_create(lvs, fullname, 1, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == -EINVAL);
+
+ g_lvserrno = -1;
+ rc = spdk_lvol_create(lvs, "lvol", 1, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol = g_lvol;
+
+ rc = spdk_lvol_create(lvs, "lvol", 1, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == -EEXIST);
+
+ g_lvserrno = -1;
+ rc = spdk_lvol_create(lvs, "lvol2", 1, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol2 = g_lvol;
+
+ spdk_lvol_close(lvol, close_cb, NULL);
+ spdk_lvol_destroy(lvol, lvol_op_complete, NULL);
+
+ g_lvserrno = -1;
+ g_lvol = NULL;
+ rc = spdk_lvol_create(lvs, "lvol", 1, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol = g_lvol;
+
+ spdk_lvol_close(lvol, close_cb, NULL);
+ spdk_lvol_destroy(lvol, destroy_cb, NULL);
+
+ spdk_lvol_close(lvol2, close_cb, NULL);
+ spdk_lvol_destroy(lvol2, destroy_cb, NULL);
+
+ /* Simulate creating two lvols with same name simultaneously. */
+ lvol = calloc(1, sizeof(*lvol));
+ SPDK_CU_ASSERT_FATAL(lvol != NULL);
+ snprintf(lvol->name, sizeof(lvol->name), "tmp_name");
+ TAILQ_INSERT_TAIL(&lvs->pending_lvols, lvol, link);
+ rc = spdk_lvol_create(lvs, "tmp_name", 1, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == -EEXIST);
+
+ /* Remove name from temporary list and try again. */
+ TAILQ_REMOVE(&lvs->pending_lvols, lvol, link);
+ free(lvol);
+
+ rc = spdk_lvol_create(lvs, "tmp_name", 1, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol = g_lvol;
+
+ spdk_lvol_close(lvol, close_cb, NULL);
+ spdk_lvol_destroy(lvol, destroy_cb, NULL);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ spdk_free_thread();
+}
+
+static void
+lvol_rename(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ struct spdk_lvol_store *lvs;
+ struct spdk_lvol *lvol, *lvol2;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs = g_lvol_store;
+
+ /* Trying to create new lvol */
+ g_lvserrno = -1;
+ rc = spdk_lvol_create(lvs, "lvol", 1, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol = g_lvol;
+
+ /* Trying to create second lvol with existing lvol name */
+ g_lvserrno = -1;
+ g_lvol = NULL;
+ rc = spdk_lvol_create(lvs, "lvol", 1, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == -EEXIST);
+ CU_ASSERT(g_lvserrno == -1);
+ SPDK_CU_ASSERT_FATAL(g_lvol == NULL);
+
+ /* Trying to create second lvol with non existing name */
+ g_lvserrno = -1;
+ rc = spdk_lvol_create(lvs, "lvol2", 1, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ lvol2 = g_lvol;
+
+ /* Trying to rename lvol with not existing name */
+ spdk_lvol_rename(lvol, "lvol_new", lvol_op_complete, NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+ CU_ASSERT_STRING_EQUAL(lvol->name, "lvol_new");
+
+ /* Trying to rename lvol with other lvol name */
+ spdk_lvol_rename(lvol2, "lvol_new", lvol_op_complete, NULL);
+ CU_ASSERT(g_lvolerrno == -EEXIST);
+ CU_ASSERT_STRING_NOT_EQUAL(lvol2->name, "lvol_new");
+
+ spdk_lvol_close(lvol, close_cb, NULL);
+ spdk_lvol_destroy(lvol, lvol_op_complete, NULL);
+
+ spdk_lvol_close(lvol2, close_cb, NULL);
+ spdk_lvol_destroy(lvol2, lvol_op_complete, NULL);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ spdk_free_thread();
+}
+
+static void
+lvs_rename(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ struct spdk_lvol_store *lvs, *lvs2;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+ g_lvserrno = -1;
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs = g_lvol_store;
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "unimportant_lvs_name");
+ g_lvserrno = -1;
+ g_lvol_store = NULL;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+ lvs2 = g_lvol_store;
+
+ /* Trying to rename lvs with new name */
+ spdk_lvs_rename(lvs, "new_lvs_name", lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name");
+
+ /* Trying to rename lvs with name lvs already has */
+ spdk_lvs_rename(lvs, "new_lvs_name", lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name");
+
+ /* Trying to rename lvs with name already existing */
+ spdk_lvs_rename(lvs2, "new_lvs_name", lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == -EEXIST);
+ CU_ASSERT_STRING_EQUAL(lvs2->name, "unimportant_lvs_name");
+
+ /* Trying to rename lvs with another rename process started with the same name */
+ /* Simulate renaming process in progress */
+ snprintf(lvs2->new_name, sizeof(lvs2->new_name), "another_new_lvs_name");
+ CU_ASSERT_STRING_EQUAL(lvs2->new_name, "another_new_lvs_name");
+ /* Start second process */
+ spdk_lvs_rename(lvs, "another_new_lvs_name", lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno == -EEXIST);
+ CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name");
+ /* reverting lvs2 new name to proper value */
+ snprintf(lvs2->new_name, sizeof(lvs2->new_name), "unimportant_lvs_name");
+ CU_ASSERT_STRING_EQUAL(lvs2->new_name, "unimportant_lvs_name");
+
+ /* Simulate error while lvs rename */
+ g_lvs_rename_blob_open_error = true;
+ spdk_lvs_rename(lvs, "complete_new_lvs_name", lvol_store_op_complete, NULL);
+ CU_ASSERT(g_lvserrno != 0);
+ CU_ASSERT_STRING_EQUAL(lvs->name, "new_lvs_name");
+ CU_ASSERT_STRING_EQUAL(lvs->new_name, "new_lvs_name");
+ g_lvs_rename_blob_open_error = false;
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_destroy(lvs2, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ spdk_free_thread();
+}
+static void lvol_refcnt(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ struct spdk_lvol *lvol;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, lvol_op_with_handle_complete, NULL);
+
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(g_lvol->ref_count == 1);
+
+ lvol = g_lvol;
+ spdk_lvol_open(g_lvol, lvol_op_with_handle_complete, NULL);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+ CU_ASSERT(lvol->ref_count == 2);
+
+ /* Trying to destroy lvol while its open should fail */
+ spdk_lvol_destroy(lvol, lvol_op_complete, NULL);
+ CU_ASSERT(g_lvolerrno != 0);
+
+ spdk_lvol_close(lvol, lvol_op_complete, NULL);
+ CU_ASSERT(lvol->ref_count == 1);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ spdk_lvol_close(lvol, lvol_op_complete, NULL);
+ CU_ASSERT(lvol->ref_count == 0);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ /* Try to close already closed lvol */
+ spdk_lvol_close(lvol, lvol_op_complete, NULL);
+ CU_ASSERT(lvol->ref_count == 0);
+ CU_ASSERT(g_lvolerrno != 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+
+ spdk_free_thread();
+}
+
+static void
+lvol_create_thin_provisioned(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ CU_ASSERT(g_lvol->blob->thin_provisioned == false);
+
+ spdk_lvol_close(g_lvol, close_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_destroy(g_lvol, destroy_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, true, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ CU_ASSERT(g_lvol->blob->thin_provisioned == true);
+
+ spdk_lvol_close(g_lvol, close_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_destroy(g_lvol, destroy_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+
+ spdk_free_thread();
+}
+
+static void
+lvol_inflate(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ g_inflate_rc = -1;
+ spdk_lvol_inflate(g_lvol, lvol_op_complete, NULL);
+ CU_ASSERT(g_lvolerrno != 0);
+
+ g_inflate_rc = 0;
+ spdk_lvol_inflate(g_lvol, lvol_op_complete, NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ spdk_lvol_close(g_lvol, close_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_destroy(g_lvol, destroy_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+
+ /* Make sure that all references to the io_channel was closed after
+ * inflate call
+ */
+ CU_ASSERT(g_io_channel == NULL);
+
+ spdk_free_thread();
+}
+
+static void
+lvol_decouple_parent(void)
+{
+ struct lvol_ut_bs_dev dev;
+ struct spdk_lvs_opts opts;
+ int rc = 0;
+
+ init_dev(&dev);
+
+ spdk_allocate_thread(_lvol_send_msg, NULL, NULL, NULL, NULL);
+
+ spdk_lvs_opts_init(&opts);
+ snprintf(opts.name, sizeof(opts.name), "lvs");
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_init(&dev.bs_dev, &opts, lvol_store_op_with_handle_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol_store != NULL);
+
+ spdk_lvol_create(g_lvol_store, "lvol", 10, false, lvol_op_with_handle_complete, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ SPDK_CU_ASSERT_FATAL(g_lvol != NULL);
+
+ g_inflate_rc = -1;
+ spdk_lvol_decouple_parent(g_lvol, lvol_op_complete, NULL);
+ CU_ASSERT(g_lvolerrno != 0);
+
+ g_inflate_rc = 0;
+ spdk_lvol_decouple_parent(g_lvol, lvol_op_complete, NULL);
+ CU_ASSERT(g_lvolerrno == 0);
+
+ spdk_lvol_close(g_lvol, close_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+ spdk_lvol_destroy(g_lvol, destroy_cb, NULL);
+ CU_ASSERT(g_lvserrno == 0);
+
+ g_lvserrno = -1;
+ rc = spdk_lvs_unload(g_lvol_store, lvol_store_op_complete, NULL);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_lvserrno == 0);
+ g_lvol_store = NULL;
+
+ free_dev(&dev);
+
+ /* Make sure that all references to the io_channel was closed after
+ * inflate call
+ */
+ CU_ASSERT(g_io_channel == NULL);
+
+ spdk_free_thread();
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("lvol", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "lvs_init_unload_success", lvs_init_unload_success) == NULL ||
+ CU_add_test(suite, "lvs_init_destroy_success", lvs_init_destroy_success) == NULL ||
+ CU_add_test(suite, "lvs_init_opts_success", lvs_init_opts_success) == NULL ||
+ CU_add_test(suite, "lvs_unload_lvs_is_null_fail", lvs_unload_lvs_is_null_fail) == NULL ||
+ CU_add_test(suite, "lvs_names", lvs_names) == NULL ||
+ CU_add_test(suite, "lvol_create_destroy_success", lvol_create_destroy_success) == NULL ||
+ CU_add_test(suite, "lvol_create_fail", lvol_create_fail) == NULL ||
+ CU_add_test(suite, "lvol_destroy_fail", lvol_destroy_fail) == NULL ||
+ CU_add_test(suite, "lvol_close_fail", lvol_close_fail) == NULL ||
+ CU_add_test(suite, "lvol_close_success", lvol_close_success) == NULL ||
+ CU_add_test(suite, "lvol_resize", lvol_resize) == NULL ||
+ CU_add_test(suite, "lvs_load", lvs_load) == NULL ||
+ CU_add_test(suite, "lvols_load", lvols_load) == NULL ||
+ CU_add_test(suite, "lvol_open", lvol_open) == NULL ||
+ CU_add_test(suite, "lvol_load", lvs_load) == NULL ||
+ CU_add_test(suite, "lvs_load", lvols_load) == NULL ||
+ CU_add_test(suite, "lvol_open", lvol_open) == NULL ||
+ CU_add_test(suite, "lvol_snapshot", lvol_snapshot) == NULL ||
+ CU_add_test(suite, "lvol_snapshot_fail", lvol_snapshot_fail) == NULL ||
+ CU_add_test(suite, "lvol_clone", lvol_clone) == NULL ||
+ CU_add_test(suite, "lvol_clone_fail", lvol_clone_fail) == NULL ||
+ CU_add_test(suite, "lvol_refcnt", lvol_refcnt) == NULL ||
+ CU_add_test(suite, "lvol_names", lvol_names) == NULL ||
+ CU_add_test(suite, "lvol_create_thin_provisioned", lvol_create_thin_provisioned) == NULL ||
+ CU_add_test(suite, "lvol_rename", lvol_rename) == NULL ||
+ CU_add_test(suite, "lvs_rename", lvs_rename) == NULL ||
+ CU_add_test(suite, "lvol_inflate", lvol_inflate) == NULL ||
+ CU_add_test(suite, "lvol_decouple_parent", lvol_decouple_parent) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/Makefile b/src/spdk/test/unit/lib/nvme/Makefile
new file mode 100644
index 00000000..fb17a2d0
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/Makefile
@@ -0,0 +1,47 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = nvme.c nvme_ctrlr.c nvme_ctrlr_cmd.c nvme_ctrlr_ocssd_cmd.c nvme_ns.c nvme_ns_cmd.c nvme_ns_ocssd_cmd.c nvme_pcie.c nvme_qpair.c \
+ nvme_quirks.c \
+
+DIRS-$(CONFIG_RDMA) += nvme_rdma.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme.c/.gitignore
new file mode 100644
index 00000000..90c0c167
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme.c/.gitignore
@@ -0,0 +1 @@
+nvme_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme.c/Makefile
new file mode 100644
index 00000000..4202cf54
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme.c/nvme_ut.c b/src/spdk/test/unit/lib/nvme/nvme.c/nvme_ut.c
new file mode 100644
index 00000000..6925a2cf
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme.c/nvme_ut.c
@@ -0,0 +1,1135 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "spdk/env.h"
+
+#include "nvme/nvme.c"
+
+#include "spdk_internal/mock.h"
+
+#include "common/lib/test_env.c"
+
+DEFINE_STUB_V(nvme_ctrlr_fail,
+ (struct spdk_nvme_ctrlr *ctrlr, bool hot_remove))
+
+DEFINE_STUB_V(nvme_ctrlr_proc_get_ref, (struct spdk_nvme_ctrlr *ctrlr))
+
+DEFINE_STUB_V(nvme_ctrlr_proc_put_ref, (struct spdk_nvme_ctrlr *ctrlr))
+
+DEFINE_STUB(spdk_pci_nvme_enumerate, int,
+ (spdk_pci_enum_cb enum_cb, void *enum_ctx), -1)
+
+DEFINE_STUB(spdk_pci_device_get_id, struct spdk_pci_id,
+ (struct spdk_pci_device *pci_dev),
+ MOCK_STRUCT_INIT(.vendor_id = 0xffff, .device_id = 0xffff,
+ .subvendor_id = 0xffff, .subdevice_id = 0xffff))
+
+DEFINE_STUB(spdk_nvme_transport_available, bool,
+ (enum spdk_nvme_transport_type trtype), true)
+
+DEFINE_STUB(nvme_ctrlr_add_process, int,
+ (struct spdk_nvme_ctrlr *ctrlr, void *devhandle), 0)
+
+DEFINE_STUB(nvme_ctrlr_process_init, int,
+ (struct spdk_nvme_ctrlr *ctrlr), 0)
+
+DEFINE_STUB(spdk_pci_device_get_addr, struct spdk_pci_addr,
+ (struct spdk_pci_device *pci_dev), {0})
+
+DEFINE_STUB(nvme_ctrlr_get_ref_count, int,
+ (struct spdk_nvme_ctrlr *ctrlr), 0)
+
+DEFINE_STUB(dummy_probe_cb, bool,
+ (void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr_opts *opts), false)
+
+DEFINE_STUB(nvme_transport_ctrlr_construct, struct spdk_nvme_ctrlr *,
+ (const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ void *devhandle), NULL)
+
+DEFINE_STUB(spdk_nvme_qpair_process_completions, int32_t,
+ (struct spdk_nvme_qpair *qpair,
+ uint32_t max_completions), 0);
+
+static bool ut_destruct_called = false;
+void
+nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+ ut_destruct_called = true;
+}
+
+void
+spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
+{
+ memset(opts, 0, sizeof(*opts));
+}
+
+static void
+memset_trid(struct spdk_nvme_transport_id *trid1, struct spdk_nvme_transport_id *trid2)
+{
+ memset(trid1, 0, sizeof(struct spdk_nvme_transport_id));
+ memset(trid2, 0, sizeof(struct spdk_nvme_transport_id));
+}
+
+static bool ut_check_trtype = false;
+int
+nvme_transport_ctrlr_scan(const struct spdk_nvme_transport_id *trid,
+ void *cb_ctx,
+ spdk_nvme_probe_cb probe_cb,
+ spdk_nvme_remove_cb remove_cb,
+ bool direct_connect)
+{
+ struct spdk_nvme_ctrlr *ctrlr = NULL;
+
+ if (ut_check_trtype == true) {
+ CU_ASSERT(trid->trtype == SPDK_NVME_TRANSPORT_PCIE);
+ }
+
+ if (direct_connect == true && probe_cb) {
+ nvme_robust_mutex_unlock(&g_spdk_nvme_driver->lock);
+ ctrlr = spdk_nvme_get_ctrlr_by_trid(trid);
+ nvme_robust_mutex_lock(&g_spdk_nvme_driver->lock);
+ probe_cb(cb_ctx, trid, &ctrlr->opts);
+ }
+ return 0;
+}
+
+static bool ut_attach_cb_called = false;
+static void
+dummy_attach_cb(void *cb_ctx, const struct spdk_nvme_transport_id *trid,
+ struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_ctrlr_opts *opts)
+{
+ ut_attach_cb_called = true;
+}
+
+static void
+test_spdk_nvme_probe(void)
+{
+ int rc = 0;
+ const struct spdk_nvme_transport_id *trid = NULL;
+ void *cb_ctx = NULL;
+ spdk_nvme_probe_cb probe_cb = NULL;
+ spdk_nvme_attach_cb attach_cb = dummy_attach_cb;
+ spdk_nvme_remove_cb remove_cb = NULL;
+ struct spdk_nvme_ctrlr ctrlr;
+ pthread_mutexattr_t attr;
+ struct nvme_driver dummy;
+ g_spdk_nvme_driver = &dummy;
+
+ /* driver init fails */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, NULL);
+ rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb);
+ CU_ASSERT(rc == -1);
+
+ /*
+ * For secondary processes, the attach_cb should automatically get
+ * called for any controllers already initialized by the primary
+ * process.
+ */
+ MOCK_SET(spdk_nvme_transport_available, false);
+ MOCK_SET(spdk_process_is_primary, true);
+ dummy.initialized = true;
+ g_spdk_nvme_driver = &dummy;
+ rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb);
+ CU_ASSERT(rc == -1);
+
+ /* driver init passes, transport available, secondary call attach_cb */
+ MOCK_SET(spdk_nvme_transport_available, true);
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, g_spdk_nvme_driver);
+ dummy.initialized = true;
+ memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
+ CU_ASSERT(pthread_mutex_init(&dummy.lock, &attr) == 0);
+ TAILQ_INIT(&dummy.shared_attached_ctrlrs);
+ TAILQ_INSERT_TAIL(&dummy.shared_attached_ctrlrs, &ctrlr, tailq);
+ ut_attach_cb_called = false;
+ /* setup nvme_transport_ctrlr_scan() stub to also check the trype */
+ ut_check_trtype = true;
+ rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_attach_cb_called == true);
+
+ /* driver init passes, transport available, we are primary */
+ MOCK_SET(spdk_process_is_primary, true);
+ TAILQ_INIT(&g_nvme_init_ctrlrs);
+ rc = spdk_nvme_probe(trid, cb_ctx, probe_cb, attach_cb, remove_cb);
+ CU_ASSERT(rc == 0);
+
+ g_spdk_nvme_driver = NULL;
+ /* reset to pre-test values */
+ MOCK_CLEAR(spdk_memzone_lookup);
+ ut_check_trtype = false;
+
+ pthread_mutex_destroy(&dummy.lock);
+ pthread_mutexattr_destroy(&attr);
+}
+
+static void
+test_spdk_nvme_connect(void)
+{
+ struct spdk_nvme_ctrlr *ret_ctrlr = NULL;
+ struct spdk_nvme_transport_id trid = {};
+ struct spdk_nvme_ctrlr_opts opts = {};
+ struct spdk_nvme_ctrlr ctrlr;
+ pthread_mutexattr_t attr;
+ struct nvme_driver dummy;
+
+ /* initialize the variable to prepare the test */
+ dummy.initialized = true;
+ TAILQ_INIT(&dummy.shared_attached_ctrlrs);
+ g_spdk_nvme_driver = &dummy;
+ CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
+ CU_ASSERT(pthread_mutex_init(&g_spdk_nvme_driver->lock, &attr) == 0);
+
+ /* set NULL trid pointer to test immediate return */
+ ret_ctrlr = spdk_nvme_connect(NULL, NULL, 0);
+ CU_ASSERT(ret_ctrlr == NULL);
+
+ /* driver init passes, transport available, secondary process connects ctrlr */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, g_spdk_nvme_driver);
+ MOCK_SET(spdk_nvme_transport_available, true);
+ memset(&trid, 0, sizeof(trid));
+ trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == NULL);
+
+ /* driver init passes, setup one ctrlr on the attached_list */
+ memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ snprintf(ctrlr.trid.traddr, sizeof(ctrlr.trid.traddr), "0000:01:00.0");
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &ctrlr, tailq);
+ /* get the ctrlr from the attached list */
+ snprintf(trid.traddr, sizeof(trid.traddr), "0000:01:00.0");
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ /* get the ctrlr from the attached list with default ctrlr opts */
+ ctrlr.opts.num_io_queues = DEFAULT_MAX_IO_QUEUES;
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
+ /* get the ctrlr from the attached list with default ctrlr opts and consistent opts_size */
+ opts.num_io_queues = 1;
+ ret_ctrlr = spdk_nvme_connect(&trid, &opts, sizeof(opts));
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, 1);
+ /* remove the attached ctrlr on the attached_list */
+ CU_ASSERT(spdk_nvme_detach(&ctrlr) == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
+
+ /* driver init passes, transport available, primary process connects ctrlr */
+ MOCK_SET(spdk_process_is_primary, true);
+ /* setup one ctrlr on the attached_list */
+ memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ snprintf(ctrlr.trid.traddr, sizeof(ctrlr.trid.traddr), "0000:02:00.0");
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ TAILQ_INSERT_TAIL(&g_spdk_nvme_driver->shared_attached_ctrlrs, &ctrlr, tailq);
+ /* get the ctrlr from the attached list */
+ snprintf(trid.traddr, sizeof(trid.traddr), "0000:02:00.0");
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ /* get the ctrlr from the attached list with default ctrlr opts */
+ ctrlr.opts.num_io_queues = DEFAULT_MAX_IO_QUEUES;
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
+ /* get the ctrlr from the attached list with default ctrlr opts and consistent opts_size */
+ opts.num_io_queues = 2;
+ ret_ctrlr = spdk_nvme_connect(&trid, &opts, sizeof(opts));
+ CU_ASSERT(ret_ctrlr == &ctrlr);
+ CU_ASSERT_EQUAL(ret_ctrlr->opts.num_io_queues, 2);
+ /* remove the attached ctrlr on the attached_list */
+ CU_ASSERT(spdk_nvme_detach(ret_ctrlr) == 0);
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
+
+ /* test driver init failure return */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, NULL);
+ ret_ctrlr = spdk_nvme_connect(&trid, NULL, 0);
+ CU_ASSERT(ret_ctrlr == NULL);
+}
+
+static void
+test_nvme_init_controllers(void)
+{
+ int rc = 0;
+ struct nvme_driver test_driver;
+ void *cb_ctx = NULL;
+ spdk_nvme_attach_cb attach_cb = dummy_attach_cb;
+ struct spdk_nvme_ctrlr ctrlr;
+ pthread_mutexattr_t attr;
+
+ g_spdk_nvme_driver = &test_driver;
+ memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ CU_ASSERT(pthread_mutexattr_init(&attr) == 0);
+ CU_ASSERT(pthread_mutex_init(&test_driver.lock, &attr) == 0);
+ TAILQ_INIT(&g_nvme_init_ctrlrs);
+ TAILQ_INSERT_TAIL(&g_nvme_init_ctrlrs, &ctrlr, tailq);
+ TAILQ_INIT(&test_driver.shared_attached_ctrlrs);
+
+ /*
+ * Try to initialize, but nvme_ctrlr_process_init will fail.
+ * Verify correct behavior when it does.
+ */
+ MOCK_SET(nvme_ctrlr_process_init, 1);
+ g_spdk_nvme_driver->initialized = false;
+ ut_destruct_called = false;
+ rc = nvme_init_controllers(cb_ctx, attach_cb);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(g_spdk_nvme_driver->initialized == true);
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_init_ctrlrs));
+ CU_ASSERT(ut_destruct_called == true);
+
+ /*
+ * Controller init OK, need to move the controller state machine
+ * forward by setting the ctrl state so that it can be moved
+ * the shared_attached_ctrlrs list.
+ */
+ TAILQ_INSERT_TAIL(&g_nvme_init_ctrlrs, &ctrlr, tailq);
+ ctrlr.state = NVME_CTRLR_STATE_READY;
+ MOCK_SET(nvme_ctrlr_process_init, 0);
+ rc = nvme_init_controllers(cb_ctx, attach_cb);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_attach_cb_called == true);
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_init_ctrlrs));
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_attached_ctrlrs));
+ CU_ASSERT(TAILQ_FIRST(&g_spdk_nvme_driver->shared_attached_ctrlrs) == &ctrlr);
+ TAILQ_REMOVE(&g_spdk_nvme_driver->shared_attached_ctrlrs, &ctrlr, tailq);
+
+ /*
+ * Non-PCIe controllers should be added to the per-process list, not the shared list.
+ */
+ memset(&ctrlr, 0, sizeof(struct spdk_nvme_ctrlr));
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+ TAILQ_INSERT_TAIL(&g_nvme_init_ctrlrs, &ctrlr, tailq);
+ ctrlr.state = NVME_CTRLR_STATE_READY;
+ MOCK_SET(nvme_ctrlr_process_init, 0);
+ rc = nvme_init_controllers(cb_ctx, attach_cb);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(ut_attach_cb_called == true);
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_init_ctrlrs));
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
+ CU_ASSERT(TAILQ_FIRST(&g_nvme_attached_ctrlrs) == &ctrlr);
+ TAILQ_REMOVE(&g_nvme_attached_ctrlrs, &ctrlr, tailq);
+
+ g_spdk_nvme_driver = NULL;
+ pthread_mutexattr_destroy(&attr);
+ pthread_mutex_destroy(&test_driver.lock);
+}
+
+static void
+test_nvme_driver_init(void)
+{
+ int rc;
+ struct nvme_driver dummy;
+ g_spdk_nvme_driver = &dummy;
+
+ /* adjust this so testing doesn't take so long */
+ g_nvme_driver_timeout_ms = 100;
+
+ /* process is primary and mem already reserved */
+ MOCK_SET(spdk_process_is_primary, true);
+ dummy.initialized = true;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == 0);
+
+ /*
+ * Process is primary and mem not yet reserved but the call
+ * to spdk_memzone_reserve() returns NULL.
+ */
+ g_spdk_nvme_driver = NULL;
+ MOCK_SET(spdk_process_is_primary, true);
+ MOCK_SET(spdk_memzone_reserve, NULL);
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == -1);
+
+ /* process is not primary, no mem already reserved */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, NULL);
+ g_spdk_nvme_driver = NULL;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == -1);
+
+ /* process is not primary, mem is already reserved & init'd */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_lookup, (void *)&dummy);
+ dummy.initialized = true;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == 0);
+
+ /* process is not primary, mem is reserved but not initialized */
+ /* and times out */
+ MOCK_SET(spdk_process_is_primary, false);
+ MOCK_SET(spdk_memzone_reserve, (void *)&dummy);
+ dummy.initialized = false;
+ rc = nvme_driver_init();
+ CU_ASSERT(rc == -1);
+
+ /* process is primary, got mem but mutex won't init */
+ MOCK_SET(spdk_process_is_primary, true);
+ MOCK_SET(spdk_memzone_reserve, (void *)&dummy);
+ MOCK_SET(pthread_mutexattr_init, -1);
+ g_spdk_nvme_driver = NULL;
+ dummy.initialized = true;
+ rc = nvme_driver_init();
+ /* for FreeBSD we can't can't effectively mock this path */
+#ifndef __FreeBSD__
+ CU_ASSERT(rc != 0);
+#else
+ CU_ASSERT(rc == 0);
+#endif
+
+ /* process is primary, got mem, mutex OK */
+ MOCK_SET(spdk_process_is_primary, true);
+ MOCK_CLEAR(pthread_mutexattr_init);
+ g_spdk_nvme_driver = NULL;
+ rc = nvme_driver_init();
+ CU_ASSERT(g_spdk_nvme_driver->initialized == false);
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_init_ctrlrs));
+ CU_ASSERT(TAILQ_EMPTY(&g_spdk_nvme_driver->shared_attached_ctrlrs));
+ CU_ASSERT(rc == 0);
+
+ g_spdk_nvme_driver = NULL;
+ MOCK_CLEAR(spdk_memzone_reserve);
+ MOCK_CLEAR(spdk_memzone_lookup);
+}
+
+static void
+test_spdk_nvme_detach(void)
+{
+ int rc = 1;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_ctrlr *ret_ctrlr;
+ struct nvme_driver test_driver;
+
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+
+ g_spdk_nvme_driver = &test_driver;
+ TAILQ_INIT(&test_driver.shared_attached_ctrlrs);
+ TAILQ_INSERT_TAIL(&test_driver.shared_attached_ctrlrs, &ctrlr, tailq);
+ CU_ASSERT(pthread_mutex_init(&test_driver.lock, NULL) == 0);
+
+ /*
+ * Controllers are ref counted so mock the function that returns
+ * the ref count so that detach will actually call the destruct
+ * function which we've mocked simply to verify that it gets
+ * called (we aren't testing what the real destruct function does
+ * here.)
+ */
+ MOCK_SET(nvme_ctrlr_get_ref_count, 0);
+ rc = spdk_nvme_detach(&ctrlr);
+ ret_ctrlr = TAILQ_FIRST(&test_driver.shared_attached_ctrlrs);
+ CU_ASSERT(ret_ctrlr == NULL);
+ CU_ASSERT(ut_destruct_called == true);
+ CU_ASSERT(rc == 0);
+
+ /*
+ * Mock the ref count to 1 so we confirm that the destruct
+ * function is not called and that attached ctrl list is
+ * not empty.
+ */
+ MOCK_SET(nvme_ctrlr_get_ref_count, 1);
+ TAILQ_INSERT_TAIL(&test_driver.shared_attached_ctrlrs, &ctrlr, tailq);
+ ut_destruct_called = false;
+ rc = spdk_nvme_detach(&ctrlr);
+ ret_ctrlr = TAILQ_FIRST(&test_driver.shared_attached_ctrlrs);
+ CU_ASSERT(ret_ctrlr != NULL);
+ CU_ASSERT(ut_destruct_called == false);
+ CU_ASSERT(rc == 0);
+
+ /*
+ * Non-PCIe controllers should be on the per-process attached_ctrlrs list, not the
+ * shared_attached_ctrlrs list. Test an RDMA controller and ensure it is removed
+ * from the correct list.
+ */
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+ TAILQ_INIT(&g_nvme_attached_ctrlrs);
+ TAILQ_INSERT_TAIL(&g_nvme_attached_ctrlrs, &ctrlr, tailq);
+ MOCK_SET(nvme_ctrlr_get_ref_count, 0);
+ rc = spdk_nvme_detach(&ctrlr);
+ CU_ASSERT(TAILQ_EMPTY(&g_nvme_attached_ctrlrs));
+ CU_ASSERT(ut_destruct_called == true);
+ CU_ASSERT(rc == 0);
+
+ g_spdk_nvme_driver = NULL;
+ pthread_mutex_destroy(&test_driver.lock);
+}
+
+static void
+test_nvme_completion_poll_cb(void)
+{
+ struct nvme_completion_poll_status status;
+ struct spdk_nvme_cpl cpl;
+
+ memset(&status, 0x0, sizeof(status));
+ memset(&cpl, 0xff, sizeof(cpl));
+
+ nvme_completion_poll_cb(&status, &cpl);
+ CU_ASSERT(status.done == true);
+ CU_ASSERT(memcmp(&cpl, &status.cpl,
+ sizeof(struct spdk_nvme_cpl)) == 0);
+}
+
+/* stub callback used by test_nvme_user_copy_cmd_complete() */
+static struct spdk_nvme_cpl ut_spdk_nvme_cpl = {0};
+static void
+dummy_cb(void *user_cb_arg, struct spdk_nvme_cpl *cpl)
+{
+ ut_spdk_nvme_cpl = *cpl;
+}
+
+static void
+test_nvme_user_copy_cmd_complete(void)
+{
+ struct nvme_request req;
+ int test_data = 0xdeadbeef;
+ int buff_size = sizeof(int);
+ void *buff;
+ static struct spdk_nvme_cpl cpl;
+
+ memset(&req, 0, sizeof(req));
+ memset(&cpl, 0x5a, sizeof(cpl));
+
+ /* test without a user buffer provided */
+ req.user_cb_fn = (void *)dummy_cb;
+ nvme_user_copy_cmd_complete(&req, &cpl);
+ CU_ASSERT(memcmp(&ut_spdk_nvme_cpl, &cpl, sizeof(cpl)) == 0);
+
+ /* test with a user buffer provided */
+ req.user_buffer = malloc(buff_size);
+ SPDK_CU_ASSERT_FATAL(req.user_buffer != NULL);
+ memset(req.user_buffer, 0, buff_size);
+ req.payload_size = buff_size;
+ buff = spdk_dma_zmalloc(buff_size, 0x100, NULL);
+ SPDK_CU_ASSERT_FATAL(buff != NULL);
+ req.payload = NVME_PAYLOAD_CONTIG(buff, NULL);
+ memcpy(buff, &test_data, buff_size);
+ req.cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
+ req.pid = getpid();
+
+ /* zero out the test value set in the callback */
+ memset(&ut_spdk_nvme_cpl, 0, sizeof(ut_spdk_nvme_cpl));
+
+ nvme_user_copy_cmd_complete(&req, &cpl);
+ CU_ASSERT(memcmp(req.user_buffer, &test_data, buff_size) == 0);
+ CU_ASSERT(memcmp(&ut_spdk_nvme_cpl, &cpl, sizeof(cpl)) == 0);
+
+ /*
+ * Now test the same path as above but this time choose an opc
+ * that results in a different data transfer type.
+ */
+ memset(&ut_spdk_nvme_cpl, 0, sizeof(ut_spdk_nvme_cpl));
+ memset(req.user_buffer, 0, buff_size);
+ buff = spdk_dma_zmalloc(buff_size, 0x100, NULL);
+ SPDK_CU_ASSERT_FATAL(buff != NULL);
+ req.payload = NVME_PAYLOAD_CONTIG(buff, NULL);
+ memcpy(buff, &test_data, buff_size);
+ req.cmd.opc = SPDK_NVME_OPC_SET_FEATURES;
+ nvme_user_copy_cmd_complete(&req, &cpl);
+ CU_ASSERT(memcmp(req.user_buffer, &test_data, buff_size) != 0);
+ CU_ASSERT(memcmp(&ut_spdk_nvme_cpl, &cpl, sizeof(cpl)) == 0);
+
+ /* clean up */
+ free(req.user_buffer);
+}
+
+static void
+test_nvme_allocate_request_null(void)
+{
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = (spdk_nvme_cmd_cb)0x1234;
+ void *cb_arg = (void *)0x5678;
+ struct nvme_request *req = NULL;
+ struct nvme_request dummy_req;
+
+ STAILQ_INIT(&qpair.free_req);
+ STAILQ_INIT(&qpair.queued_req);
+
+ /*
+ * Put a dummy on the queue so we can make a request
+ * and confirm that what comes back is what we expect.
+ */
+ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
+
+ req = nvme_allocate_request_null(&qpair, cb_fn, cb_arg);
+
+ /*
+ * Compare the req with the parmaters that we passed in
+ * as well as what the function is supposed to update.
+ */
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ CU_ASSERT(req->cb_fn == cb_fn);
+ CU_ASSERT(req->cb_arg == cb_arg);
+ CU_ASSERT(req->pid == getpid());
+ CU_ASSERT(nvme_payload_type(&req->payload) == NVME_PAYLOAD_TYPE_CONTIG);
+ CU_ASSERT(req->payload.md == NULL);
+ CU_ASSERT(req->payload.contig_or_cb_arg == NULL);
+}
+
+static void
+test_nvme_allocate_request(void)
+{
+ struct spdk_nvme_qpair qpair;
+ struct nvme_payload payload;
+ uint32_t payload_struct_size = sizeof(payload);
+ spdk_nvme_cmd_cb cb_fn = (spdk_nvme_cmd_cb)0x1234;
+ void *cb_arg = (void *)0x6789;
+ struct nvme_request *req = NULL;
+ struct nvme_request dummy_req;
+
+ /* Fill the whole payload struct with a known pattern */
+ memset(&payload, 0x5a, payload_struct_size);
+ STAILQ_INIT(&qpair.free_req);
+ STAILQ_INIT(&qpair.queued_req);
+
+ /* Test trying to allocate a request when no requests are available */
+ req = nvme_allocate_request(&qpair, &payload, payload_struct_size,
+ cb_fn, cb_arg);
+ CU_ASSERT(req == NULL);
+
+ /* put a dummy on the queue, and then allocate one */
+ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
+ req = nvme_allocate_request(&qpair, &payload, payload_struct_size,
+ cb_fn, cb_arg);
+
+ /* all the req elements should now match the passed in paramters */
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ CU_ASSERT(req->cb_fn == cb_fn);
+ CU_ASSERT(req->cb_arg == cb_arg);
+ CU_ASSERT(memcmp(&req->payload, &payload, payload_struct_size) == 0);
+ CU_ASSERT(req->payload_size == payload_struct_size);
+ CU_ASSERT(req->qpair == &qpair);
+ CU_ASSERT(req->pid == getpid());
+}
+
+static void
+test_nvme_free_request(void)
+{
+ struct nvme_request match_req;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request *req;
+
+ /* put a req on the Q, take it off and compare */
+ memset(&match_req.cmd, 0x5a, sizeof(struct spdk_nvme_cmd));
+ match_req.qpair = &qpair;
+ /* the code under tests asserts this condition */
+ match_req.num_children = 0;
+ STAILQ_INIT(&qpair.free_req);
+
+ nvme_free_request(&match_req);
+ req = STAILQ_FIRST(&match_req.qpair->free_req);
+ CU_ASSERT(req == &match_req);
+}
+
+static void
+test_nvme_allocate_request_user_copy(void)
+{
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = (spdk_nvme_cmd_cb)0x12345;
+ void *cb_arg = (void *)0x12345;
+ bool host_to_controller = true;
+ struct nvme_request *req;
+ struct nvme_request dummy_req;
+ int test_data = 0xdeadbeef;
+ void *buffer = NULL;
+ uint32_t payload_size = sizeof(int);
+
+ STAILQ_INIT(&qpair.free_req);
+ STAILQ_INIT(&qpair.queued_req);
+
+ /* no buffer or valid payload size, early NULL return */
+ req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
+ cb_arg, host_to_controller);
+ CU_ASSERT(req == NULL);
+
+ /* good buffer and valid payload size */
+ buffer = malloc(payload_size);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ memcpy(buffer, &test_data, payload_size);
+
+ /* put a dummy on the queue */
+ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
+
+ MOCK_CLEAR(spdk_malloc)
+ MOCK_CLEAR(spdk_zmalloc)
+ req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
+ cb_arg, host_to_controller);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ CU_ASSERT(req->user_cb_fn == cb_fn);
+ CU_ASSERT(req->user_cb_arg == cb_arg);
+ CU_ASSERT(req->user_buffer == buffer);
+ CU_ASSERT(req->cb_arg == req);
+ CU_ASSERT(memcmp(req->payload.contig_or_cb_arg, buffer, payload_size) == 0);
+ spdk_dma_free(req->payload.contig_or_cb_arg);
+
+ /* same thing but additional path coverage, no copy */
+ host_to_controller = false;
+ STAILQ_INSERT_HEAD(&qpair.free_req, &dummy_req, stailq);
+
+ req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
+ cb_arg, host_to_controller);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ CU_ASSERT(req->user_cb_fn == cb_fn);
+ CU_ASSERT(req->user_cb_arg == cb_arg);
+ CU_ASSERT(req->user_buffer == buffer);
+ CU_ASSERT(req->cb_arg == req);
+ CU_ASSERT(memcmp(req->payload.contig_or_cb_arg, buffer, payload_size) != 0);
+ spdk_dma_free(req->payload.contig_or_cb_arg);
+
+ /* good buffer and valid payload size but make spdk_dma_zmalloc fail */
+ /* set the mock pointer to NULL for spdk_dma_zmalloc */
+ MOCK_SET(spdk_dma_zmalloc, NULL);
+ req = nvme_allocate_request_user_copy(&qpair, buffer, payload_size, cb_fn,
+ cb_arg, host_to_controller);
+ CU_ASSERT(req == NULL);
+ free(buffer);
+ MOCK_CLEAR(spdk_dma_zmalloc);
+}
+
+static void
+test_nvme_ctrlr_probe(void)
+{
+ int rc = 0;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ const struct spdk_nvme_transport_id trid = {};
+ void *devhandle = NULL;
+ void *cb_ctx = NULL;
+ struct spdk_nvme_ctrlr *dummy = NULL;
+
+ /* test when probe_cb returns false */
+ MOCK_SET(dummy_probe_cb, false);
+ rc = nvme_ctrlr_probe(&trid, devhandle, dummy_probe_cb, cb_ctx);
+ CU_ASSERT(rc == 1);
+
+ /* probe_cb returns true but we can't construct a ctrl */
+ MOCK_SET(dummy_probe_cb, true);
+ MOCK_SET(nvme_transport_ctrlr_construct, NULL);
+ rc = nvme_ctrlr_probe(&trid, devhandle, dummy_probe_cb, cb_ctx);
+ CU_ASSERT(rc == -1);
+
+ /* happy path */
+ g_spdk_nvme_driver = malloc(sizeof(struct nvme_driver));
+ SPDK_CU_ASSERT_FATAL(g_spdk_nvme_driver != NULL);
+ MOCK_SET(dummy_probe_cb, true);
+ MOCK_SET(nvme_transport_ctrlr_construct, &ctrlr);
+ TAILQ_INIT(&g_nvme_init_ctrlrs);
+ rc = nvme_ctrlr_probe(&trid, devhandle, dummy_probe_cb, cb_ctx);
+ CU_ASSERT(rc == 0);
+ dummy = TAILQ_FIRST(&g_nvme_init_ctrlrs);
+ CU_ASSERT(dummy == ut_nvme_transport_ctrlr_construct);
+ TAILQ_REMOVE(&g_nvme_init_ctrlrs, dummy, tailq);
+ MOCK_CLEAR_P(nvme_transport_ctrlr_construct);
+
+ free(g_spdk_nvme_driver);
+}
+
+static void
+test_nvme_robust_mutex_init_shared(void)
+{
+ pthread_mutex_t mtx;
+ int rc = 0;
+
+ /* test where both pthread calls succeed */
+ MOCK_SET(pthread_mutexattr_init, 0);
+ MOCK_SET(pthread_mutex_init, 0);
+ rc = nvme_robust_mutex_init_shared(&mtx);
+ CU_ASSERT(rc == 0);
+
+ /* test where we can't init attr's but init mutex works */
+ MOCK_SET(pthread_mutexattr_init, -1);
+ MOCK_SET(pthread_mutex_init, 0);
+ rc = nvme_robust_mutex_init_shared(&mtx);
+ /* for FreeBSD the only possible return value is 0 */
+#ifndef __FreeBSD__
+ CU_ASSERT(rc != 0);
+#else
+ CU_ASSERT(rc == 0);
+#endif
+
+ /* test where we can init attr's but the mutex init fails */
+ MOCK_SET(pthread_mutexattr_init, 0);
+ MOCK_SET(pthread_mutex_init, -1);
+ rc = nvme_robust_mutex_init_shared(&mtx);
+ /* for FreeBSD the only possible return value is 0 */
+#ifndef __FreeBSD__
+ CU_ASSERT(rc != 0);
+#else
+ CU_ASSERT(rc == 0);
+#endif
+}
+
+static void
+test_opc_data_transfer(void)
+{
+ enum spdk_nvme_data_transfer xfer;
+
+ xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_FLUSH);
+ CU_ASSERT(xfer == SPDK_NVME_DATA_NONE);
+
+ xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_WRITE);
+ CU_ASSERT(xfer == SPDK_NVME_DATA_HOST_TO_CONTROLLER);
+
+ xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_READ);
+ CU_ASSERT(xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
+
+ xfer = spdk_nvme_opc_get_data_transfer(SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(xfer == SPDK_NVME_DATA_CONTROLLER_TO_HOST);
+}
+
+static void
+test_trid_parse_and_compare(void)
+{
+ struct spdk_nvme_transport_id trid1, trid2;
+ int ret;
+
+ /* set trid1 trid2 value to id parse */
+ ret = spdk_nvme_transport_id_parse(NULL, "trtype:PCIe traddr:0000:04:00.0");
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ ret = spdk_nvme_transport_id_parse(&trid1, NULL);
+ CU_ASSERT(ret == -EINVAL);
+ ret = spdk_nvme_transport_id_parse(NULL, NULL);
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ ret = spdk_nvme_transport_id_parse(&trid1, "trtype-PCIe traddr-0000-04-00.0");
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ ret = spdk_nvme_transport_id_parse(&trid1, "trtype-PCIe traddr-0000-04-00.0-:");
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ ret = spdk_nvme_transport_id_parse(&trid1, " \t\n:");
+ CU_ASSERT(ret == -EINVAL);
+ memset(&trid1, 0, sizeof(trid1));
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1,
+ "trtype:rdma\n"
+ "adrfam:ipv4\n"
+ "traddr:192.168.100.8\n"
+ "trsvcid:4420\n"
+ "subnqn:nqn.2014-08.org.nvmexpress.discovery") == 0);
+ CU_ASSERT(trid1.trtype == SPDK_NVME_TRANSPORT_RDMA);
+ CU_ASSERT(trid1.adrfam == SPDK_NVMF_ADRFAM_IPV4);
+ CU_ASSERT(strcmp(trid1.traddr, "192.168.100.8") == 0);
+ CU_ASSERT(strcmp(trid1.trsvcid, "4420") == 0);
+ CU_ASSERT(strcmp(trid1.subnqn, "nqn.2014-08.org.nvmexpress.discovery") == 0);
+
+ memset(&trid2, 0, sizeof(trid2));
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:0000:04:00.0") == 0);
+ CU_ASSERT(trid2.trtype == SPDK_NVME_TRANSPORT_PCIE);
+ CU_ASSERT(strcmp(trid2.traddr, "0000:04:00.0") == 0);
+
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) != 0);
+
+ /* set trid1 trid2 and test id_compare */
+ memset_trid(&trid1, &trid2);
+ trid1.adrfam = SPDK_NVMF_ADRFAM_IPV6;
+ trid2.adrfam = SPDK_NVMF_ADRFAM_IPV4;
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret > 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.traddr, sizeof(trid1.traddr), "192.168.100.8");
+ snprintf(trid2.traddr, sizeof(trid2.traddr), "192.168.100.9");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret < 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.trsvcid, sizeof(trid1.trsvcid), "4420");
+ snprintf(trid2.trsvcid, sizeof(trid2.trsvcid), "4421");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret < 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.subnqn, sizeof(trid1.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
+ snprintf(trid2.subnqn, sizeof(trid2.subnqn), "subnqn:nqn.2017-08.org.nvmexpress.discovery");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret < 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.subnqn, sizeof(trid1.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
+ snprintf(trid2.subnqn, sizeof(trid2.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret == 0);
+
+ memset_trid(&trid1, &trid2);
+ snprintf(trid1.subnqn, sizeof(trid1.subnqn), "subnqn:nqn.2016-08.org.nvmexpress.discovery");
+ snprintf(trid2.subnqn, sizeof(trid2.subnqn), "subnqn:nqn.2016-08.org.Nvmexpress.discovery");
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret > 0);
+
+ memset_trid(&trid1, &trid2);
+ ret = spdk_nvme_transport_id_compare(&trid1, &trid2);
+ CU_ASSERT(ret == 0);
+
+ /* Compare PCI addresses via spdk_pci_addr_compare (rather than as strings) */
+ memset_trid(&trid1, &trid2);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:PCIe traddr:0000:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) == 0);
+
+ memset_trid(&trid1, &trid2);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:PCIe traddr:0000:05:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) > 0);
+
+ memset_trid(&trid1, &trid2);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype:PCIe traddr:0000:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype:PCIe traddr:05:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) < 0);
+
+ memset_trid(&trid1, &trid2);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid1, "trtype=PCIe traddr=0000:04:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_parse(&trid2, "trtype=PCIe traddr=05:00.0") == 0);
+ CU_ASSERT(spdk_nvme_transport_id_compare(&trid1, &trid2) < 0);
+}
+
+static void
+test_spdk_nvme_transport_id_parse_trtype(void)
+{
+
+ enum spdk_nvme_transport_type *trtype;
+ enum spdk_nvme_transport_type sct;
+ char *str;
+
+ trtype = NULL;
+ str = "unit_test";
+
+ /* test function returned value when trtype is NULL but str not NULL */
+ CU_ASSERT(spdk_nvme_transport_id_parse_trtype(trtype, str) == (-EINVAL));
+
+ /* test function returned value when str is NULL but trtype not NULL */
+ trtype = &sct;
+ str = NULL;
+ CU_ASSERT(spdk_nvme_transport_id_parse_trtype(trtype, str) == (-EINVAL));
+
+ /* test function returned value when str and strtype not NULL, but str value
+ * not "PCIe" or "RDMA" */
+ str = "unit_test";
+ CU_ASSERT(spdk_nvme_transport_id_parse_trtype(trtype, str) == (-ENOENT));
+
+ /* test trtype value when use function "strcasecmp" to compare str and "PCIe",not case-sensitive */
+ str = "PCIe";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_PCIE);
+
+ str = "pciE";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_PCIE);
+
+ /* test trtype value when use function "strcasecmp" to compare str and "RDMA",not case-sensitive */
+ str = "RDMA";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_RDMA);
+
+ str = "rdma";
+ spdk_nvme_transport_id_parse_trtype(trtype, str);
+ CU_ASSERT((*trtype) == SPDK_NVME_TRANSPORT_RDMA);
+
+}
+
+static void
+test_spdk_nvme_transport_id_parse_adrfam(void)
+{
+
+ enum spdk_nvmf_adrfam *adrfam;
+ enum spdk_nvmf_adrfam sct;
+ char *str;
+
+ adrfam = NULL;
+ str = "unit_test";
+
+ /* test function returned value when adrfam is NULL but str not NULL */
+ CU_ASSERT(spdk_nvme_transport_id_parse_adrfam(adrfam, str) == (-EINVAL));
+
+ /* test function returned value when str is NULL but adrfam not NULL */
+ adrfam = &sct;
+ str = NULL;
+ CU_ASSERT(spdk_nvme_transport_id_parse_adrfam(adrfam, str) == (-EINVAL));
+
+ /* test function returned value when str and adrfam not NULL, but str value
+ * not "IPv4" or "IPv6" or "IB" or "FC" */
+ str = "unit_test";
+ CU_ASSERT(spdk_nvme_transport_id_parse_adrfam(adrfam, str) == (-ENOENT));
+
+ /* test adrfam value when use function "strcasecmp" to compare str and "IPv4",not case-sensitive */
+ str = "IPv4";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV4);
+
+ str = "ipV4";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV4);
+
+ /* test adrfam value when use function "strcasecmp" to compare str and "IPv6",not case-sensitive */
+ str = "IPv6";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV6);
+
+ str = "ipV6";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IPV6);
+
+ /* test adrfam value when use function "strcasecmp" to compare str and "IB",not case-sensitive */
+ str = "IB";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IB);
+
+ str = "ib";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_IB);
+
+ /* test adrfam value when use function "strcasecmp" to compare str and "FC",not case-sensitive */
+ str = "FC";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_FC);
+
+ str = "fc";
+ spdk_nvme_transport_id_parse_adrfam(adrfam, str);
+ CU_ASSERT((*adrfam) == SPDK_NVMF_ADRFAM_FC);
+
+}
+
+static void
+test_trid_trtype_str(void)
+{
+ const char *s;
+
+ s = spdk_nvme_transport_id_trtype_str(-5);
+ CU_ASSERT(s == NULL);
+
+ s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_PCIE);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "PCIe") == 0);
+
+ s = spdk_nvme_transport_id_trtype_str(SPDK_NVME_TRANSPORT_RDMA);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "RDMA") == 0);
+}
+
+static void
+test_trid_adrfam_str(void)
+{
+ const char *s;
+
+ s = spdk_nvme_transport_id_adrfam_str(-5);
+ CU_ASSERT(s == NULL);
+
+ s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_IPV4);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "IPv4") == 0);
+
+ s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_IPV6);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "IPv6") == 0);
+
+ s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_IB);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "IB") == 0);
+
+ s = spdk_nvme_transport_id_adrfam_str(SPDK_NVMF_ADRFAM_FC);
+ SPDK_CU_ASSERT_FATAL(s != NULL);
+ CU_ASSERT(strcmp(s, "FC") == 0);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test_opc_data_transfer",
+ test_opc_data_transfer) == NULL ||
+ CU_add_test(suite, "test_spdk_nvme_transport_id_parse_trtype",
+ test_spdk_nvme_transport_id_parse_trtype) == NULL ||
+ CU_add_test(suite, "test_spdk_nvme_transport_id_parse_adrfam",
+ test_spdk_nvme_transport_id_parse_adrfam) == NULL ||
+ CU_add_test(suite, "test_trid_parse_and_compare",
+ test_trid_parse_and_compare) == NULL ||
+ CU_add_test(suite, "test_trid_trtype_str",
+ test_trid_trtype_str) == NULL ||
+ CU_add_test(suite, "test_trid_adrfam_str",
+ test_trid_adrfam_str) == NULL ||
+ CU_add_test(suite, "test_nvme_ctrlr_probe",
+ test_nvme_ctrlr_probe) == NULL ||
+ CU_add_test(suite, "test_spdk_nvme_probe",
+ test_spdk_nvme_probe) == NULL ||
+ CU_add_test(suite, "test_spdk_nvme_connect",
+ test_spdk_nvme_connect) == NULL ||
+ CU_add_test(suite, "test_nvme_init_controllers",
+ test_nvme_init_controllers) == NULL ||
+ CU_add_test(suite, "test_nvme_driver_init",
+ test_nvme_driver_init) == NULL ||
+ CU_add_test(suite, "test_spdk_nvme_detach",
+ test_spdk_nvme_detach) == NULL ||
+ CU_add_test(suite, "test_nvme_completion_poll_cb",
+ test_nvme_completion_poll_cb) == NULL ||
+ CU_add_test(suite, "test_nvme_user_copy_cmd_complete",
+ test_nvme_user_copy_cmd_complete) == NULL ||
+ CU_add_test(suite, "test_nvme_allocate_request_null",
+ test_nvme_allocate_request_null) == NULL ||
+ CU_add_test(suite, "test_nvme_allocate_request",
+ test_nvme_allocate_request) == NULL ||
+ CU_add_test(suite, "test_nvme_free_request",
+ test_nvme_free_request) == NULL ||
+ CU_add_test(suite, "test_nvme_allocate_request_user_copy",
+ test_nvme_allocate_request_user_copy) == NULL ||
+ CU_add_test(suite, "test_nvme_robust_mutex_init_shared",
+ test_nvme_robust_mutex_init_shared) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/.gitignore
new file mode 100644
index 00000000..97a75bee
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/.gitignore
@@ -0,0 +1 @@
+nvme_ctrlr_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/Makefile
new file mode 100644
index 00000000..3ce33dc4
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ctrlr_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c
new file mode 100644
index 00000000..db7469ff
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr.c/nvme_ctrlr_ut.c
@@ -0,0 +1,1795 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "spdk_internal/log.h"
+
+#include "common/lib/test_env.c"
+
+struct spdk_trace_flag SPDK_LOG_NVME = {
+ .name = "nvme",
+ .enabled = false,
+};
+
+#include "nvme/nvme_ctrlr.c"
+#include "nvme/nvme_quirks.c"
+
+pid_t g_spdk_nvme_pid;
+
+struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+struct nvme_driver *g_spdk_nvme_driver = &_g_nvme_driver;
+
+struct spdk_nvme_registers g_ut_nvme_regs = {};
+
+__thread int nvme_thread_ioq_index = -1;
+
+uint32_t set_size = 1;
+
+int set_status_cpl = -1;
+
+DEFINE_STUB(nvme_ctrlr_cmd_set_host_id, int,
+ (struct spdk_nvme_ctrlr *ctrlr, void *host_id, uint32_t host_id_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg), 0);
+DEFINE_STUB(nvme_ctrlr_identify_ns, int, (struct spdk_nvme_ns *ns), 0);
+DEFINE_STUB(nvme_ctrlr_identify_id_desc, int, (struct spdk_nvme_ns *ns), 0);
+DEFINE_STUB_V(nvme_ns_set_identify_data, (struct spdk_nvme_ns *ns));
+
+struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ void *devhandle)
+{
+ return NULL;
+}
+
+int
+nvme_transport_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+ nvme_ctrlr_destruct_finish(ctrlr);
+
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_enable(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_set_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t value)
+{
+ SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
+ *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_set_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t value)
+{
+ SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
+ *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset) = value;
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_get_reg_4(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint32_t *value)
+{
+ SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 4);
+ *value = *(uint32_t *)((uintptr_t)&g_ut_nvme_regs + offset);
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_get_reg_8(struct spdk_nvme_ctrlr *ctrlr, uint32_t offset, uint64_t *value)
+{
+ SPDK_CU_ASSERT_FATAL(offset <= sizeof(struct spdk_nvme_registers) - 8);
+ *value = *(uint64_t *)((uintptr_t)&g_ut_nvme_regs + offset);
+ return 0;
+}
+
+uint32_t
+nvme_transport_ctrlr_get_max_xfer_size(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return UINT32_MAX;
+}
+
+uint16_t
+nvme_transport_ctrlr_get_max_sges(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 1;
+}
+
+void *
+nvme_transport_ctrlr_alloc_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, size_t size)
+{
+ return NULL;
+}
+
+int
+nvme_transport_ctrlr_free_cmb_io_buffer(struct spdk_nvme_ctrlr *ctrlr, void *buf, size_t size)
+{
+ return 0;
+}
+
+struct spdk_nvme_qpair *
+nvme_transport_ctrlr_create_io_qpair(struct spdk_nvme_ctrlr *ctrlr, uint16_t qid,
+ const struct spdk_nvme_io_qpair_opts *opts)
+{
+ struct spdk_nvme_qpair *qpair;
+
+ qpair = calloc(1, sizeof(*qpair));
+ SPDK_CU_ASSERT_FATAL(qpair != NULL);
+
+ qpair->ctrlr = ctrlr;
+ qpair->id = qid;
+ qpair->qprio = opts->qprio;
+
+ return qpair;
+}
+
+int
+nvme_transport_ctrlr_delete_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
+{
+ free(qpair);
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_reinit_io_qpair(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_qpair *qpair)
+{
+ return 0;
+}
+
+int
+nvme_transport_qpair_reset(struct spdk_nvme_qpair *qpair)
+{
+ return 0;
+}
+
+int
+nvme_driver_init(void)
+{
+ return 0;
+}
+
+int nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
+ struct spdk_nvme_ctrlr *ctrlr,
+ enum spdk_nvme_qprio qprio,
+ uint32_t num_requests)
+{
+ qpair->id = id;
+ qpair->qprio = qprio;
+ qpair->ctrlr = ctrlr;
+
+ return 0;
+}
+
+static void
+fake_cpl_success(spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ struct spdk_nvme_cpl cpl = {};
+
+ cpl.status.sc = SPDK_NVME_SC_SUCCESS;
+ cb_fn(cb_arg, &cpl);
+}
+
+int
+spdk_nvme_ctrlr_cmd_set_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
+ uint32_t cdw11, uint32_t cdw12, void *payload, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ CU_ASSERT(0);
+ return -1;
+}
+
+int
+spdk_nvme_ctrlr_cmd_get_feature(struct spdk_nvme_ctrlr *ctrlr, uint8_t feature,
+ uint32_t cdw11, void *payload, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ CU_ASSERT(0);
+ return -1;
+}
+
+int
+spdk_nvme_ctrlr_cmd_get_log_page(struct spdk_nvme_ctrlr *ctrlr, uint8_t log_page,
+ uint32_t nsid, void *payload, uint32_t payload_size,
+ uint64_t offset, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl_success(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ASYNC_EVENT_REQUEST);
+
+ /*
+ * For the purposes of this unit test, we don't need to bother emulating request submission.
+ */
+
+ return 0;
+}
+
+int32_t
+spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ return 0;
+}
+
+void
+nvme_qpair_disable(struct spdk_nvme_qpair *qpair)
+{
+}
+
+void
+nvme_qpair_enable(struct spdk_nvme_qpair *qpair)
+{
+}
+
+void
+nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ struct nvme_completion_poll_status *status = arg;
+
+ status->cpl = *cpl;
+ status->done = true;
+}
+
+int
+spdk_nvme_wait_for_completion_robust_lock(
+ struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status,
+ pthread_mutex_t *robust_mutex)
+{
+ status->done = true;
+ memset(&status->cpl, 0, sizeof(status->cpl));
+ status->cpl.status.sc = 0;
+ if (set_status_cpl == 1) {
+ status->cpl.status.sc = 1;
+ }
+ return spdk_nvme_cpl_is_error(&status->cpl) ? -EIO : 0;
+}
+
+int
+spdk_nvme_wait_for_completion(struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status)
+{
+ return spdk_nvme_wait_for_completion_robust_lock(qpair, status, NULL);
+}
+
+
+int
+nvme_ctrlr_cmd_set_async_event_config(struct spdk_nvme_ctrlr *ctrlr,
+ union spdk_nvme_feat_async_event_configuration config, spdk_nvme_cmd_cb cb_fn,
+ void *cb_arg)
+{
+ fake_cpl_success(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
+ void *payload, size_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ if (cns == SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST) {
+ uint32_t count = 0;
+ uint32_t i = 0;
+ struct spdk_nvme_ns_list *ns_list = (struct spdk_nvme_ns_list *)payload;
+
+ for (i = 1; i <= ctrlr->num_ns; i++) {
+ if (i <= nsid) {
+ continue;
+ }
+
+ ns_list->ns_list[count++] = i;
+ if (count == SPDK_COUNTOF(ns_list->ns_list)) {
+ break;
+ }
+ }
+
+ }
+ fake_cpl_success(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_set_num_queues(struct spdk_nvme_ctrlr *ctrlr,
+ uint32_t num_queues, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl_success(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_get_num_queues(struct spdk_nvme_ctrlr *ctrlr,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl_success(cb_fn, cb_arg);
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_attach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
+ struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_detach_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid,
+ struct spdk_nvme_ctrlr_list *payload, spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_create_ns(struct spdk_nvme_ctrlr *ctrlr, struct spdk_nvme_ns_data *payload,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_delete_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, spdk_nvme_cmd_cb cb_fn,
+ void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_format(struct spdk_nvme_ctrlr *ctrlr, uint32_t nsid, struct spdk_nvme_format *format,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_fw_commit(struct spdk_nvme_ctrlr *ctrlr, const struct spdk_nvme_fw_commit *fw_commit,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ CU_ASSERT(fw_commit->ca == SPDK_NVME_FW_COMMIT_REPLACE_IMG);
+ if (fw_commit->fs == 0) {
+ return -1;
+ }
+ set_status_cpl = 1;
+ if (ctrlr->is_resetting == true) {
+ set_status_cpl = 0;
+ }
+ return 0;
+}
+
+int
+nvme_ctrlr_cmd_fw_image_download(struct spdk_nvme_ctrlr *ctrlr,
+ uint32_t size, uint32_t offset, void *payload,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ if ((size != 0 && payload == NULL) || (size == 0 && payload != NULL)) {
+ return -1;
+ }
+ CU_ASSERT(offset == 0);
+ return 0;
+}
+
+void
+nvme_ns_destruct(struct spdk_nvme_ns *ns)
+{
+}
+
+int
+nvme_ns_construct(struct spdk_nvme_ns *ns, uint32_t id,
+ struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+#define DECLARE_AND_CONSTRUCT_CTRLR() \
+ struct spdk_nvme_ctrlr ctrlr = {}; \
+ struct spdk_nvme_qpair adminq = {}; \
+ struct nvme_request req; \
+ \
+ STAILQ_INIT(&adminq.free_req); \
+ STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq); \
+ ctrlr.adminq = &adminq;
+
+static void
+test_nvme_ctrlr_init_en_1_rdy_0(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 1, CSTS.RDY = 0
+ */
+ g_ut_nvme_regs.cc.bits.en = 1;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ * init() should set CC.EN = 0.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Transition to CSTS.RDY = 0.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ /*
+ * Transition to CC.EN = 1
+ */
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_1_rdy_1(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 1, CSTS.RDY = 1
+ * init() should set CC.EN = 0.
+ */
+ g_ut_nvme_regs.cc.bits.en = 1;
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Transition to CSTS.RDY = 0.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ /*
+ * Transition to CC.EN = 1
+ */
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_0_rdy_0_ams_rr(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Default round robin enabled
+ */
+ g_ut_nvme_regs.cap.bits.ams = 0x0;
+ ctrlr.cap = g_ut_nvme_regs.cap;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ /*
+ * Case 1: default round robin arbitration mechanism selected
+ */
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 2: weighted round robin arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 3: vendor specific arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 4: invalid arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 5: reset to default round robin arbitration mechanism
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Weighted round robin enabled
+ */
+ g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_WRR;
+ ctrlr.cap = g_ut_nvme_regs.cap;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ /*
+ * Case 1: default round robin arbitration mechanism selected
+ */
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 2: weighted round robin arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 3: vendor specific arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 4: invalid arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 5: reset to weighted round robin arbitration mechanism
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_WRR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_WRR);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+static void
+test_nvme_ctrlr_init_en_0_rdy_0_ams_vs(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Default round robin enabled
+ */
+ g_ut_nvme_regs.cap.bits.ams = SPDK_NVME_CAP_AMS_VS;
+ ctrlr.cap = g_ut_nvme_regs.cap;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ /*
+ * Case 1: default round robin arbitration mechanism selected
+ */
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_RR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_RR);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 2: weighted round robin arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_WRR;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 3: vendor specific arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 4: invalid arbitration mechanism selected
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS + 1;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) != 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 0);
+
+ /*
+ * Complete and destroy the controller
+ */
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+
+ /*
+ * Reset to initial state
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ /*
+ * Case 5: reset to vendor specific arbitration mechanism
+ */
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ ctrlr.opts.arb_mechanism = SPDK_NVME_CC_AMS_VS;
+
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.ams == SPDK_NVME_CC_AMS_VS);
+ CU_ASSERT(ctrlr.opts.arb_mechanism == SPDK_NVME_CC_AMS_VS);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_0_rdy_0(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 0
+ * init() should set CC.EN = 1.
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_init_en_0_rdy_1(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ memset(&g_ut_nvme_regs, 0, sizeof(g_ut_nvme_regs));
+
+ /*
+ * Initial state: CC.EN = 0, CSTS.RDY = 1
+ */
+ g_ut_nvme_regs.cc.bits.en = 0;
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(&ctrlr) == 0);
+ ctrlr.cdata.nn = 1;
+ ctrlr.page_size = 0x1000;
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_INIT);
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_DISABLE_WAIT_FOR_READY_0);
+
+ /*
+ * Transition to CSTS.RDY = 0.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 0;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE);
+
+ /*
+ * Transition to CC.EN = 1
+ */
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_WAIT_FOR_READY_1);
+ CU_ASSERT(g_ut_nvme_regs.cc.bits.en == 1);
+
+ /*
+ * Transition to CSTS.RDY = 1.
+ */
+ g_ut_nvme_regs.csts.bits.rdy = 1;
+ CU_ASSERT(nvme_ctrlr_process_init(&ctrlr) == 0);
+ CU_ASSERT(ctrlr.state == NVME_CTRLR_STATE_ENABLE_ADMIN_QUEUE);
+
+ /*
+ * Transition to READY.
+ */
+ while (ctrlr.state != NVME_CTRLR_STATE_READY) {
+ nvme_ctrlr_process_init(&ctrlr);
+ }
+
+ g_ut_nvme_regs.csts.bits.shst = SPDK_NVME_SHST_COMPLETE;
+ nvme_ctrlr_destruct(&ctrlr);
+}
+
+static void
+setup_qpairs(struct spdk_nvme_ctrlr *ctrlr, uint32_t num_io_queues)
+{
+ uint32_t i;
+
+ CU_ASSERT(pthread_mutex_init(&ctrlr->ctrlr_lock, NULL) == 0);
+
+ SPDK_CU_ASSERT_FATAL(nvme_ctrlr_construct(ctrlr) == 0);
+
+ ctrlr->page_size = 0x1000;
+ ctrlr->opts.num_io_queues = num_io_queues;
+ ctrlr->free_io_qids = spdk_bit_array_create(num_io_queues + 1);
+ SPDK_CU_ASSERT_FATAL(ctrlr->free_io_qids != NULL);
+
+ spdk_bit_array_clear(ctrlr->free_io_qids, 0);
+ for (i = 1; i <= num_io_queues; i++) {
+ spdk_bit_array_set(ctrlr->free_io_qids, i);
+ }
+}
+
+static void
+cleanup_qpairs(struct spdk_nvme_ctrlr *ctrlr)
+{
+ nvme_ctrlr_destruct(ctrlr);
+}
+
+static void
+test_alloc_io_qpair_rr_1(void)
+{
+ struct spdk_nvme_io_qpair_opts opts;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_qpair *q0;
+
+ setup_qpairs(&ctrlr, 1);
+
+ /*
+ * Fake to simulate the controller with default round robin
+ * arbitration mechanism.
+ */
+ g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_RR;
+
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
+
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
+ /* Only 1 I/O qpair was allocated, so this should fail */
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0) == NULL);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+
+ /*
+ * Now that the qpair has been returned to the free list,
+ * we should be able to allocate it again.
+ */
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+
+ /* Only 0 qprio is acceptable for default round robin arbitration mechanism */
+ opts.qprio = 1;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 == NULL);
+
+ opts.qprio = 2;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 == NULL);
+
+ opts.qprio = 3;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 == NULL);
+
+ /* Only 0 ~ 3 qprio is acceptable */
+ opts.qprio = 4;
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
+
+ cleanup_qpairs(&ctrlr);
+}
+
+static void
+test_alloc_io_qpair_wrr_1(void)
+{
+ struct spdk_nvme_io_qpair_opts opts;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_qpair *q0, *q1;
+
+ setup_qpairs(&ctrlr, 2);
+
+ /*
+ * Fake to simulate the controller with weighted round robin
+ * arbitration mechanism.
+ */
+ g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
+
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
+
+ /*
+ * Allocate 2 qpairs and free them
+ */
+ opts.qprio = 0;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
+
+ opts.qprio = 1;
+ q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q1 != NULL);
+ SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+
+ /*
+ * Allocate 2 qpairs and free them in the reverse order
+ */
+ opts.qprio = 2;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 2);
+
+ opts.qprio = 3;
+ q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q1 != NULL);
+ SPDK_CU_ASSERT_FATAL(q1->qprio == 3);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
+
+ /* Only 0 ~ 3 qprio is acceptable */
+ opts.qprio = 4;
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
+
+ cleanup_qpairs(&ctrlr);
+}
+
+static void
+test_alloc_io_qpair_wrr_2(void)
+{
+ struct spdk_nvme_io_qpair_opts opts;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_qpair *q0, *q1, *q2, *q3;
+
+ setup_qpairs(&ctrlr, 4);
+
+ /*
+ * Fake to simulate the controller with weighted round robin
+ * arbitration mechanism.
+ */
+ g_ut_nvme_regs.cc.bits.ams = SPDK_NVME_CC_AMS_WRR;
+
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
+
+ opts.qprio = 0;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 0);
+
+ opts.qprio = 1;
+ q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q1 != NULL);
+ SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
+
+ opts.qprio = 2;
+ q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q2 != NULL);
+ SPDK_CU_ASSERT_FATAL(q2->qprio == 2);
+
+ opts.qprio = 3;
+ q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q3 != NULL);
+ SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
+
+ /* Only 4 I/O qpairs was allocated, so this should fail */
+ opts.qprio = 0;
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts)) == NULL);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+
+ /*
+ * Now that the qpair has been returned to the free list,
+ * we should be able to allocate it again.
+ *
+ * Allocate 4 I/O qpairs and half of them with same qprio.
+ */
+ opts.qprio = 1;
+ q0 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q0 != NULL);
+ SPDK_CU_ASSERT_FATAL(q0->qprio == 1);
+
+ opts.qprio = 1;
+ q1 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q1 != NULL);
+ SPDK_CU_ASSERT_FATAL(q1->qprio == 1);
+
+ opts.qprio = 3;
+ q2 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q2 != NULL);
+ SPDK_CU_ASSERT_FATAL(q2->qprio == 3);
+
+ opts.qprio = 3;
+ q3 = spdk_nvme_ctrlr_alloc_io_qpair(&ctrlr, &opts, sizeof(opts));
+ SPDK_CU_ASSERT_FATAL(q3 != NULL);
+ SPDK_CU_ASSERT_FATAL(q3->qprio == 3);
+
+ /*
+ * Free all I/O qpairs in reverse order
+ */
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q0) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q1) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q2) == 0);
+ SPDK_CU_ASSERT_FATAL(spdk_nvme_ctrlr_free_io_qpair(q3) == 0);
+
+ cleanup_qpairs(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_fail(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ ctrlr.opts.num_io_queues = 0;
+ nvme_ctrlr_fail(&ctrlr, false);
+
+ CU_ASSERT(ctrlr.is_failed == true);
+}
+
+static void
+test_nvme_ctrlr_construct_intel_support_log_page_list(void)
+{
+ bool res;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_intel_log_page_directory payload = {};
+ struct spdk_pci_id pci_id = {};
+
+ /* Get quirks for a device with all 0 vendor/device id */
+ ctrlr.quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT(ctrlr.quirks == 0);
+
+ nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
+ CU_ASSERT(res == false);
+
+ /* Set the vendor to Intel, but provide no device id */
+ ctrlr.cdata.vid = pci_id.vendor_id = SPDK_PCI_VID_INTEL;
+ payload.temperature_statistics_log_len = 1;
+ ctrlr.quirks = nvme_get_quirks(&pci_id);
+ memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
+
+ nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
+ CU_ASSERT(res == false);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
+ CU_ASSERT(res == false);
+
+ /* set valid vendor id, device id and sub device id */
+ ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
+ payload.temperature_statistics_log_len = 0;
+ pci_id.vendor_id = SPDK_PCI_VID_INTEL;
+ pci_id.device_id = 0x0953;
+ pci_id.subvendor_id = SPDK_PCI_VID_INTEL;
+ pci_id.subdevice_id = 0x3702;
+ ctrlr.quirks = nvme_get_quirks(&pci_id);
+ memset(ctrlr.log_page_supported, 0, sizeof(ctrlr.log_page_supported));
+
+ nvme_ctrlr_construct_intel_support_log_page_list(&ctrlr, &payload);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE);
+ CU_ASSERT(res == false);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_log_page_supported(&ctrlr, SPDK_NVME_INTEL_LOG_SMART);
+ CU_ASSERT(res == false);
+}
+
+static void
+test_nvme_ctrlr_set_supported_features(void)
+{
+ bool res;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ /* set a invalid vendor id */
+ ctrlr.cdata.vid = 0xFFFF;
+ nvme_ctrlr_set_supported_features(&ctrlr);
+ res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
+ CU_ASSERT(res == false);
+
+ ctrlr.cdata.vid = SPDK_PCI_VID_INTEL;
+ nvme_ctrlr_set_supported_features(&ctrlr);
+ res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_FEAT_ARBITRATION);
+ CU_ASSERT(res == true);
+ res = spdk_nvme_ctrlr_is_feature_supported(&ctrlr, SPDK_NVME_INTEL_FEAT_MAX_LBA);
+ CU_ASSERT(res == true);
+}
+
+static void
+test_ctrlr_get_default_ctrlr_opts(void)
+{
+ struct spdk_nvme_ctrlr_opts opts = {};
+
+ CU_ASSERT(spdk_uuid_parse(&g_spdk_nvme_driver->default_extended_host_id,
+ "e53e9258-c93b-48b5-be1a-f025af6d232a") == 0);
+
+ memset(&opts, 0, sizeof(opts));
+
+ /* set a smaller opts_size */
+ CU_ASSERT(sizeof(opts) > 8);
+ spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, 8);
+ CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
+ CU_ASSERT_TRUE(opts.use_cmb_sqs);
+ /* check below fields are not initialized by default value */
+ CU_ASSERT_EQUAL(opts.arb_mechanism, 0);
+ CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 0);
+ CU_ASSERT_EQUAL(opts.io_queue_size, 0);
+ CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
+ for (int i = 0; i < 8; i++) {
+ CU_ASSERT(opts.host_id[i] == 0);
+ }
+ for (int i = 0; i < 16; i++) {
+ CU_ASSERT(opts.extended_host_id[i] == 0);
+ }
+ CU_ASSERT(strlen(opts.hostnqn) == 0);
+ CU_ASSERT(strlen(opts.src_addr) == 0);
+ CU_ASSERT(strlen(opts.src_svcid) == 0);
+
+ /* set a consistent opts_size */
+ spdk_nvme_ctrlr_get_default_ctrlr_opts(&opts, sizeof(opts));
+ CU_ASSERT_EQUAL(opts.num_io_queues, DEFAULT_MAX_IO_QUEUES);
+ CU_ASSERT_TRUE(opts.use_cmb_sqs);
+ CU_ASSERT_EQUAL(opts.arb_mechanism, SPDK_NVME_CC_AMS_RR);
+ CU_ASSERT_EQUAL(opts.keep_alive_timeout_ms, 10 * 1000);
+ CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
+ CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
+ for (int i = 0; i < 8; i++) {
+ CU_ASSERT(opts.host_id[i] == 0);
+ }
+ CU_ASSERT_STRING_EQUAL(opts.hostnqn,
+ "2014-08.org.nvmexpress:uuid:e53e9258-c93b-48b5-be1a-f025af6d232a");
+ CU_ASSERT(memcmp(opts.extended_host_id, &g_spdk_nvme_driver->default_extended_host_id,
+ sizeof(opts.extended_host_id)) == 0);
+ CU_ASSERT(strlen(opts.src_addr) == 0);
+ CU_ASSERT(strlen(opts.src_svcid) == 0);
+}
+
+static void
+test_ctrlr_get_default_io_qpair_opts(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct spdk_nvme_io_qpair_opts opts = {};
+
+ memset(&opts, 0, sizeof(opts));
+
+ /* set a smaller opts_size */
+ ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
+ CU_ASSERT(sizeof(opts) > 8);
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, 8);
+ CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
+ CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
+ /* check below field is not initialized by default value */
+ CU_ASSERT_EQUAL(opts.io_queue_requests, 0);
+
+ /* set a consistent opts_size */
+ ctrlr.opts.io_queue_size = DEFAULT_IO_QUEUE_SIZE;
+ ctrlr.opts.io_queue_requests = DEFAULT_IO_QUEUE_REQUESTS;
+ spdk_nvme_ctrlr_get_default_io_qpair_opts(&ctrlr, &opts, sizeof(opts));
+ CU_ASSERT_EQUAL(opts.qprio, SPDK_NVME_QPRIO_URGENT);
+ CU_ASSERT_EQUAL(opts.io_queue_size, DEFAULT_IO_QUEUE_SIZE);
+ CU_ASSERT_EQUAL(opts.io_queue_requests, DEFAULT_IO_QUEUE_REQUESTS);
+}
+
+#if 0 /* TODO: move to PCIe-specific unit test */
+static void
+test_nvme_ctrlr_alloc_cmb(void)
+{
+ int rc;
+ uint64_t offset;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ ctrlr.cmb_size = 0x1000000;
+ ctrlr.cmb_current_offset = 0x100;
+ rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x200, 0x1000, &offset);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(offset == 0x1000);
+ CU_ASSERT(ctrlr.cmb_current_offset == 0x1200);
+
+ rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800, 0x1000, &offset);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(offset == 0x2000);
+ CU_ASSERT(ctrlr.cmb_current_offset == 0x2800);
+
+ rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x800000, 0x100000, &offset);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(offset == 0x100000);
+ CU_ASSERT(ctrlr.cmb_current_offset == 0x900000);
+
+ rc = nvme_ctrlr_alloc_cmb(&ctrlr, 0x8000000, 0x1000, &offset);
+ CU_ASSERT(rc == -1);
+}
+#endif
+
+static void
+test_spdk_nvme_ctrlr_update_firmware(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+ void *payload = NULL;
+ int point_payload = 1;
+ int slot = 0;
+ int ret = 0;
+ struct spdk_nvme_status status;
+ enum spdk_nvme_fw_commit_action commit_action = SPDK_NVME_FW_COMMIT_REPLACE_IMG;
+
+ /* Set invalid size check function return value */
+ set_size = 5;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -1);
+
+ /* When payload is NULL but set_size < min_page_size */
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -1);
+
+ /* When payload not NULL but min_page_size is 0 */
+ set_size = 4;
+ ctrlr.min_page_size = 0;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -1);
+
+ /* Check firmware image download when payload not NULL and min_page_size not 0 , status.cpl value is 1 */
+ set_status_cpl = 1;
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -ENXIO);
+
+ /* Check firmware image download and set status.cpl value is 0 */
+ set_status_cpl = 0;
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -1);
+
+ /* Check firmware commit */
+ ctrlr.is_resetting = false;
+ set_status_cpl = 0;
+ slot = 1;
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == -ENXIO);
+
+ /* Set size check firmware download and firmware commit */
+ ctrlr.is_resetting = true;
+ set_status_cpl = 0;
+ slot = 1;
+ set_size = 4;
+ ctrlr.min_page_size = 5;
+ payload = &point_payload;
+ ret = spdk_nvme_ctrlr_update_firmware(&ctrlr, payload, set_size, slot, commit_action, &status);
+ CU_ASSERT(ret == 0);
+
+ set_status_cpl = 0;
+}
+
+int
+nvme_ctrlr_cmd_doorbell_buffer_config(struct spdk_nvme_ctrlr *ctrlr, uint64_t prp1, uint64_t prp2,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ fake_cpl_success(cb_fn, cb_arg);
+ return 0;
+}
+
+static void
+test_spdk_nvme_ctrlr_doorbell_buffer_config(void)
+{
+ struct spdk_nvme_ctrlr ctrlr = {};
+ int ret = -1;
+
+ ctrlr.cdata.oacs.doorbell_buffer_config = 1;
+ ctrlr.trid.trtype = SPDK_NVME_TRANSPORT_PCIE;
+ ctrlr.page_size = 0x1000;
+ MOCK_CLEAR(spdk_malloc)
+ MOCK_CLEAR(spdk_zmalloc)
+ MOCK_CLEAR(spdk_dma_malloc)
+ MOCK_CLEAR(spdk_dma_zmalloc)
+ ret = nvme_ctrlr_set_doorbell_buffer_config(&ctrlr);
+ CU_ASSERT(ret == 0);
+ nvme_ctrlr_free_doorbell_buffer(&ctrlr);
+}
+
+static void
+test_nvme_ctrlr_test_active_ns(void)
+{
+ uint32_t nsid, minor;
+ size_t ns_id_count;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ ctrlr.page_size = 0x1000;
+
+ for (minor = 0; minor <= 2; minor++) {
+ ctrlr.cdata.ver.bits.mjr = 1;
+ ctrlr.cdata.ver.bits.mnr = minor;
+ ctrlr.cdata.ver.bits.ter = 0;
+ ctrlr.num_ns = 1531;
+ nvme_ctrlr_identify_active_ns(&ctrlr);
+
+ for (nsid = 1; nsid <= ctrlr.num_ns; nsid++) {
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
+ }
+ ctrlr.num_ns = 1559;
+ for (; nsid <= ctrlr.num_ns; nsid++) {
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == false);
+ }
+ ctrlr.num_ns = 1531;
+ for (nsid = 0; nsid < ctrlr.num_ns; nsid++) {
+ ctrlr.active_ns_list[nsid] = 0;
+ }
+ CU_ASSERT(spdk_nvme_ctrlr_get_first_active_ns(&ctrlr) == 0);
+
+ ctrlr.active_ns_list[0] = 1;
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
+ nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
+ CU_ASSERT(nsid == 1);
+
+ ctrlr.active_ns_list[1] = 3;
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 1) == true);
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 2) == false);
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, 3) == true);
+ nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
+ CU_ASSERT(nsid == 3);
+ nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid);
+ CU_ASSERT(nsid == 0);
+
+ memset(ctrlr.active_ns_list, 0, ctrlr.num_ns);
+ for (nsid = 0; nsid < ctrlr.num_ns; nsid++) {
+ ctrlr.active_ns_list[nsid] = nsid + 1;
+ }
+
+ ns_id_count = 0;
+ for (nsid = spdk_nvme_ctrlr_get_first_active_ns(&ctrlr);
+ nsid != 0; nsid = spdk_nvme_ctrlr_get_next_active_ns(&ctrlr, nsid)) {
+ CU_ASSERT(spdk_nvme_ctrlr_is_active_ns(&ctrlr, nsid) == true);
+ ns_id_count++;
+ }
+ CU_ASSERT(ns_id_count == ctrlr.num_ns);
+
+ nvme_ctrlr_destruct(&ctrlr);
+ }
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme_ctrlr", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test nvme_ctrlr init CC.EN = 1 CSTS.RDY = 0",
+ test_nvme_ctrlr_init_en_1_rdy_0) == NULL
+ || CU_add_test(suite, "test nvme_ctrlr init CC.EN = 1 CSTS.RDY = 1",
+ test_nvme_ctrlr_init_en_1_rdy_1) == NULL
+ || CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0",
+ test_nvme_ctrlr_init_en_0_rdy_0) == NULL
+ || CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 1",
+ test_nvme_ctrlr_init_en_0_rdy_1) == NULL
+ || CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0 AMS = RR",
+ test_nvme_ctrlr_init_en_0_rdy_0_ams_rr) == NULL
+ || CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0 AMS = WRR",
+ test_nvme_ctrlr_init_en_0_rdy_0_ams_wrr) == NULL
+ || CU_add_test(suite, "test nvme_ctrlr init CC.EN = 0 CSTS.RDY = 0 AMS = VS",
+ test_nvme_ctrlr_init_en_0_rdy_0_ams_vs) == NULL
+ || CU_add_test(suite, "alloc_io_qpair_rr 1", test_alloc_io_qpair_rr_1) == NULL
+ || CU_add_test(suite, "get_default_ctrlr_opts", test_ctrlr_get_default_ctrlr_opts) == NULL
+ || CU_add_test(suite, "get_default_io_qpair_opts", test_ctrlr_get_default_io_qpair_opts) == NULL
+ || CU_add_test(suite, "alloc_io_qpair_wrr 1", test_alloc_io_qpair_wrr_1) == NULL
+ || CU_add_test(suite, "alloc_io_qpair_wrr 2", test_alloc_io_qpair_wrr_2) == NULL
+ || CU_add_test(suite, "test nvme ctrlr function update_firmware",
+ test_spdk_nvme_ctrlr_update_firmware) == NULL
+ || CU_add_test(suite, "test nvme_ctrlr function nvme_ctrlr_fail", test_nvme_ctrlr_fail) == NULL
+ || CU_add_test(suite, "test nvme ctrlr function nvme_ctrlr_construct_intel_support_log_page_list",
+ test_nvme_ctrlr_construct_intel_support_log_page_list) == NULL
+ || CU_add_test(suite, "test nvme ctrlr function nvme_ctrlr_set_supported_features",
+ test_nvme_ctrlr_set_supported_features) == NULL
+ || CU_add_test(suite, "test nvme ctrlr function nvme_ctrlr_set_doorbell_buffer_config",
+ test_spdk_nvme_ctrlr_doorbell_buffer_config) == NULL
+#if 0 /* TODO: move to PCIe-specific unit test */
+ || CU_add_test(suite, "test nvme ctrlr function nvme_ctrlr_alloc_cmb",
+ test_nvme_ctrlr_alloc_cmb) == NULL
+#endif
+ || CU_add_test(suite, "test nvme ctrlr function test_nvme_ctrlr_test_active_ns",
+ test_nvme_ctrlr_test_active_ns) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/.gitignore
new file mode 100644
index 00000000..1568b476
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/.gitignore
@@ -0,0 +1 @@
+nvme_ctrlr_cmd_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/Makefile
new file mode 100644
index 00000000..5c647dd3
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ctrlr_cmd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut.c
new file mode 100644
index 00000000..8cbc4476
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_cmd.c/nvme_ctrlr_cmd_ut.c
@@ -0,0 +1,645 @@
+
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_ctrlr_cmd.c"
+
+#define CTRLR_CDATA_ELPE 5
+
+pid_t g_spdk_nvme_pid;
+
+struct nvme_request g_req;
+
+uint32_t error_num_entries;
+uint32_t health_log_nsid = 1;
+uint8_t feature = 1;
+uint32_t feature_cdw11 = 1;
+uint32_t feature_cdw12 = 1;
+uint8_t get_feature = 1;
+uint32_t get_feature_cdw11 = 1;
+uint32_t fw_img_size = 1024;
+uint32_t fw_img_offset = 0;
+uint16_t abort_cid = 1;
+uint16_t abort_sqid = 1;
+uint32_t namespace_management_nsid = 1;
+uint32_t format_nvme_nsid = 1;
+
+uint32_t expected_feature_ns = 2;
+uint32_t expected_feature_cdw10 = SPDK_NVME_FEAT_LBA_RANGE_TYPE;
+uint32_t expected_feature_cdw11 = 1;
+uint32_t expected_feature_cdw12 = 1;
+
+typedef void (*verify_request_fn_t)(struct nvme_request *req);
+verify_request_fn_t verify_fn;
+
+static void verify_firmware_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(req->cmd.nsid == SPDK_NVME_GLOBAL_NS_TAG);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_firmware_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_LOG_FIRMWARE_SLOT;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_health_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(req->cmd.nsid == health_log_nsid);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_health_information_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_LOG_HEALTH_INFORMATION;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_error_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(req->cmd.nsid == SPDK_NVME_GLOBAL_NS_TAG);
+
+ temp_cdw10 = (((sizeof(struct spdk_nvme_error_information_entry) * error_num_entries) /
+ sizeof(uint32_t) - 1) << 16) | SPDK_NVME_LOG_ERROR;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_set_feature_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_SET_FEATURES);
+ CU_ASSERT(req->cmd.cdw10 == feature);
+ CU_ASSERT(req->cmd.cdw11 == feature_cdw11);
+ CU_ASSERT(req->cmd.cdw12 == feature_cdw12);
+}
+
+static void verify_set_feature_ns_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_SET_FEATURES);
+ CU_ASSERT(req->cmd.cdw10 == expected_feature_cdw10);
+ CU_ASSERT(req->cmd.cdw11 == expected_feature_cdw11);
+ CU_ASSERT(req->cmd.cdw12 == expected_feature_cdw12);
+ CU_ASSERT(req->cmd.nsid == expected_feature_ns);
+}
+
+static void verify_get_feature_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_FEATURES);
+ CU_ASSERT(req->cmd.cdw10 == get_feature);
+ CU_ASSERT(req->cmd.cdw11 == get_feature_cdw11);
+}
+
+static void verify_get_feature_ns_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_FEATURES);
+ CU_ASSERT(req->cmd.cdw10 == expected_feature_cdw10);
+ CU_ASSERT(req->cmd.cdw11 == expected_feature_cdw11);
+ CU_ASSERT(req->cmd.nsid == expected_feature_ns);
+}
+
+static void verify_abort_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_ABORT);
+ CU_ASSERT(req->cmd.cdw10 == (((uint32_t)abort_cid << 16) | abort_sqid));
+}
+
+static void verify_io_raw_cmd(struct nvme_request *req)
+{
+ struct spdk_nvme_cmd command = {};
+
+ CU_ASSERT(memcmp(&req->cmd, &command, sizeof(req->cmd)) == 0);
+}
+
+static void verify_io_raw_cmd_with_md(struct nvme_request *req)
+{
+ struct spdk_nvme_cmd command = {};
+
+ CU_ASSERT(memcmp(&req->cmd, &command, sizeof(req->cmd)) == 0);
+}
+
+static void verify_intel_smart_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+ CU_ASSERT(req->cmd.nsid == health_log_nsid);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_smart_information_page) /
+ sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_SMART;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_temperature_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_temperature_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_TEMPERATURE;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_read_latency_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_rw_latency_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_write_latency_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_rw_latency_page) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_get_log_page_directory(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_log_page_directory) / sizeof(uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_intel_marketing_description_log_page(struct nvme_request *req)
+{
+ uint32_t temp_cdw10;
+
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_GET_LOG_PAGE);
+
+ temp_cdw10 = ((sizeof(struct spdk_nvme_intel_marketing_description_page) / sizeof(
+ uint32_t) - 1) << 16) |
+ SPDK_NVME_INTEL_MARKETING_DESCRIPTION;
+ CU_ASSERT(req->cmd.cdw10 == temp_cdw10);
+}
+
+static void verify_namespace_attach(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_NS_ATTACHMENT);
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_NS_CTRLR_ATTACH);
+ CU_ASSERT(req->cmd.nsid == namespace_management_nsid);
+}
+
+static void verify_namespace_detach(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_NS_ATTACHMENT);
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_NS_CTRLR_DETACH);
+ CU_ASSERT(req->cmd.nsid == namespace_management_nsid);
+}
+
+static void verify_namespace_create(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_NS_MANAGEMENT);
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_NS_MANAGEMENT_CREATE);
+ CU_ASSERT(req->cmd.nsid == 0);
+}
+
+static void verify_namespace_delete(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_NS_MANAGEMENT);
+ CU_ASSERT(req->cmd.cdw10 == SPDK_NVME_NS_MANAGEMENT_DELETE);
+ CU_ASSERT(req->cmd.nsid == namespace_management_nsid);
+}
+
+static void verify_format_nvme(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_FORMAT_NVM);
+ CU_ASSERT(req->cmd.cdw10 == 0);
+ CU_ASSERT(req->cmd.nsid == format_nvme_nsid);
+}
+
+static void verify_fw_commit(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_FIRMWARE_COMMIT);
+ CU_ASSERT(req->cmd.cdw10 == 0x09);
+}
+
+static void verify_fw_image_download(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_NVME_OPC_FIRMWARE_IMAGE_DOWNLOAD);
+ CU_ASSERT(req->cmd.cdw10 == (fw_img_size >> 2) - 1);
+ CU_ASSERT(req->cmd.cdw11 == fw_img_offset >> 2);
+}
+
+struct nvme_request *
+nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair, void *buffer, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller)
+{
+ /* For the unit test, we don't actually need to copy the buffer */
+ return nvme_allocate_request_contig(qpair, buffer, payload_size, cb_fn, cb_arg);
+}
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ verify_fn(req);
+ /* stop analyzer from thinking stack variable addresses are stored in a global */
+ memset(req, 0, sizeof(*req));
+
+ return 0;
+}
+
+int
+nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr, struct nvme_request *req)
+{
+ verify_fn(req);
+ /* stop analyzer from thinking stack variable addresses are stored in a global */
+ memset(req, 0, sizeof(*req));
+
+ return 0;
+}
+
+#define DECLARE_AND_CONSTRUCT_CTRLR() \
+ struct spdk_nvme_ctrlr ctrlr = {}; \
+ struct spdk_nvme_qpair adminq = {}; \
+ struct nvme_request req; \
+ \
+ STAILQ_INIT(&adminq.free_req); \
+ STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq); \
+ ctrlr.adminq = &adminq;
+
+static void
+test_firmware_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_firmware_page payload = {};
+
+ verify_fn = verify_firmware_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_FIRMWARE_SLOT, SPDK_NVME_GLOBAL_NS_TAG,
+ &payload,
+ sizeof(payload), 0, NULL, NULL);
+}
+
+static void
+test_health_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_health_information_page payload = {};
+
+ verify_fn = verify_health_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_HEALTH_INFORMATION, health_log_nsid,
+ &payload,
+ sizeof(payload), 0, NULL, NULL);
+}
+
+static void
+test_error_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_error_information_entry payload = {};
+
+ ctrlr.cdata.elpe = CTRLR_CDATA_ELPE;
+
+ verify_fn = verify_error_log_page;
+
+ /* valid page */
+ error_num_entries = 1;
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_LOG_ERROR, SPDK_NVME_GLOBAL_NS_TAG, &payload,
+ sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_smart_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_smart_information_page payload = {};
+
+ verify_fn = verify_intel_smart_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_SMART, health_log_nsid, &payload,
+ sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_temperature_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_temperature_page payload = {};
+
+ verify_fn = verify_intel_temperature_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_TEMPERATURE, SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_read_latency_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_rw_latency_page payload = {};
+
+ verify_fn = verify_intel_read_latency_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_READ_CMD_LATENCY,
+ SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_write_latency_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_rw_latency_page payload = {};
+
+ verify_fn = verify_intel_write_latency_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_WRITE_CMD_LATENCY,
+ SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_get_log_page_directory(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_log_page_directory payload = {};
+
+ verify_fn = verify_intel_get_log_page_directory;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_LOG_PAGE_DIRECTORY,
+ SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_intel_marketing_description_get_log_page(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_intel_marketing_description_page payload = {};
+
+ verify_fn = verify_intel_marketing_description_log_page;
+
+ spdk_nvme_ctrlr_cmd_get_log_page(&ctrlr, SPDK_NVME_INTEL_MARKETING_DESCRIPTION,
+ SPDK_NVME_GLOBAL_NS_TAG,
+ &payload, sizeof(payload), 0, NULL, NULL);
+}
+
+static void test_generic_get_log_pages(void)
+{
+ test_error_get_log_page();
+ test_health_get_log_page();
+ test_firmware_get_log_page();
+}
+
+static void test_intel_get_log_pages(void)
+{
+ test_intel_get_log_page_directory();
+ test_intel_smart_get_log_page();
+ test_intel_temperature_get_log_page();
+ test_intel_read_latency_get_log_page();
+ test_intel_write_latency_get_log_page();
+ test_intel_marketing_description_get_log_page();
+}
+
+static void
+test_set_feature_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_set_feature_cmd;
+
+ spdk_nvme_ctrlr_cmd_set_feature(&ctrlr, feature, feature_cdw11, feature_cdw12, NULL, 0, NULL, NULL);
+}
+
+static void
+test_get_feature_ns_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_get_feature_ns_cmd;
+
+ spdk_nvme_ctrlr_cmd_get_feature_ns(&ctrlr, expected_feature_cdw10,
+ expected_feature_cdw11, NULL, 0,
+ NULL, NULL, expected_feature_ns);
+}
+
+static void
+test_set_feature_ns_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_set_feature_ns_cmd;
+
+ spdk_nvme_ctrlr_cmd_set_feature_ns(&ctrlr, expected_feature_cdw10,
+ expected_feature_cdw11, expected_feature_cdw12,
+ NULL, 0, NULL, NULL, expected_feature_ns);
+}
+
+static void
+test_get_feature_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_get_feature_cmd;
+
+ spdk_nvme_ctrlr_cmd_get_feature(&ctrlr, get_feature, get_feature_cdw11, NULL, 0, NULL, NULL);
+}
+
+static void
+test_abort_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_qpair qpair = {};
+
+ STAILQ_INIT(&ctrlr.queued_aborts);
+
+ verify_fn = verify_abort_cmd;
+
+ qpair.id = abort_sqid;
+ spdk_nvme_ctrlr_cmd_abort(&ctrlr, &qpair, abort_cid, NULL, NULL);
+}
+
+static void
+test_io_raw_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_cmd cmd = {};
+
+ verify_fn = verify_io_raw_cmd;
+
+ spdk_nvme_ctrlr_cmd_io_raw(&ctrlr, &qpair, &cmd, NULL, 1, NULL, NULL);
+}
+
+static void
+test_io_raw_cmd_with_md(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_cmd cmd = {};
+
+ verify_fn = verify_io_raw_cmd_with_md;
+
+ spdk_nvme_ctrlr_cmd_io_raw_with_md(&ctrlr, &qpair, &cmd, NULL, 1, NULL, NULL, NULL);
+}
+
+static void
+test_get_log_pages(void)
+{
+ test_generic_get_log_pages();
+ test_intel_get_log_pages();
+}
+
+static void
+test_namespace_attach(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_ctrlr_list payload = {};
+
+ verify_fn = verify_namespace_attach;
+
+ nvme_ctrlr_cmd_attach_ns(&ctrlr, namespace_management_nsid, &payload, NULL, NULL);
+}
+
+static void
+test_namespace_detach(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_ctrlr_list payload = {};
+
+ verify_fn = verify_namespace_detach;
+
+ nvme_ctrlr_cmd_detach_ns(&ctrlr, namespace_management_nsid, &payload, NULL, NULL);
+}
+
+static void
+test_namespace_create(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_ns_data payload = {};
+
+ verify_fn = verify_namespace_create;
+ nvme_ctrlr_cmd_create_ns(&ctrlr, &payload, NULL, NULL);
+}
+
+static void
+test_namespace_delete(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_namespace_delete;
+ nvme_ctrlr_cmd_delete_ns(&ctrlr, namespace_management_nsid, NULL, NULL);
+}
+
+static void
+test_format_nvme(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_format format = {};
+
+ verify_fn = verify_format_nvme;
+
+ nvme_ctrlr_cmd_format(&ctrlr, format_nvme_nsid, &format, NULL, NULL);
+}
+
+static void
+test_fw_commit(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+ struct spdk_nvme_fw_commit fw_commit = {};
+
+ fw_commit.ca = SPDK_NVME_FW_COMMIT_REPLACE_AND_ENABLE_IMG;
+ fw_commit.fs = 1;
+
+ verify_fn = verify_fw_commit;
+
+ nvme_ctrlr_cmd_fw_commit(&ctrlr, &fw_commit, NULL, NULL);
+}
+
+static void
+test_fw_image_download(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ verify_fn = verify_fw_image_download;
+
+ nvme_ctrlr_cmd_fw_image_download(&ctrlr, fw_img_size, fw_img_offset, NULL,
+ NULL, NULL);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme_ctrlr_cmd", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test ctrlr cmd get_log_pages", test_get_log_pages) == NULL
+ || CU_add_test(suite, "test ctrlr cmd set_feature", test_set_feature_cmd) == NULL
+ || CU_add_test(suite, "test ctrlr cmd set_feature_ns", test_set_feature_ns_cmd) == NULL
+ || CU_add_test(suite, "test ctrlr cmd get_feature", test_get_feature_cmd) == NULL
+ || CU_add_test(suite, "test ctrlr cmd get_feature_ns", test_get_feature_ns_cmd) == NULL
+ || CU_add_test(suite, "test ctrlr cmd abort_cmd", test_abort_cmd) == NULL
+ || CU_add_test(suite, "test ctrlr cmd io_raw_cmd", test_io_raw_cmd) == NULL
+ || CU_add_test(suite, "test ctrlr cmd io_raw_cmd_with_md", test_io_raw_cmd_with_md) == NULL
+ || CU_add_test(suite, "test ctrlr cmd namespace_attach", test_namespace_attach) == NULL
+ || CU_add_test(suite, "test ctrlr cmd namespace_detach", test_namespace_detach) == NULL
+ || CU_add_test(suite, "test ctrlr cmd namespace_create", test_namespace_create) == NULL
+ || CU_add_test(suite, "test ctrlr cmd namespace_delete", test_namespace_delete) == NULL
+ || CU_add_test(suite, "test ctrlr cmd format_nvme", test_format_nvme) == NULL
+ || CU_add_test(suite, "test ctrlr cmd fw_commit", test_fw_commit) == NULL
+ || CU_add_test(suite, "test ctrlr cmd fw_image_download", test_fw_image_download) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/.gitignore
new file mode 100644
index 00000000..2813105d
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/.gitignore
@@ -0,0 +1 @@
+nvme_ctrlr_ocssd_cmd_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/Makefile
new file mode 100644
index 00000000..9446b8d5
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ctrlr_ocssd_cmd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut.c
new file mode 100644
index 00000000..98eccf34
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ctrlr_ocssd_cmd.c/nvme_ctrlr_ocssd_cmd_ut.c
@@ -0,0 +1,116 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_ctrlr_ocssd_cmd.c"
+
+#define DECLARE_AND_CONSTRUCT_CTRLR() \
+ struct spdk_nvme_ctrlr ctrlr = {}; \
+ struct spdk_nvme_qpair adminq = {}; \
+ struct nvme_request req; \
+ \
+ STAILQ_INIT(&adminq.free_req); \
+ STAILQ_INSERT_HEAD(&adminq.free_req, &req, stailq); \
+ ctrlr.adminq = &adminq;
+
+pid_t g_spdk_nvme_pid;
+struct nvme_request g_req;
+typedef void (*verify_request_fn_t)(struct nvme_request *req);
+verify_request_fn_t verify_fn;
+
+static const uint32_t expected_geometry_ns = 1;
+
+int
+nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr, struct nvme_request *req)
+{
+ verify_fn(req);
+ memset(req, 0, sizeof(*req));
+ return 0;
+}
+
+struct nvme_request *
+nvme_allocate_request_user_copy(struct spdk_nvme_qpair *qpair, void *buffer, uint32_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg, bool host_to_controller)
+{
+ /* For the unit test, we don't actually need to copy the buffer */
+ return nvme_allocate_request_contig(qpair, buffer, payload_size, cb_fn, cb_arg);
+}
+
+static void verify_geometry_cmd(struct nvme_request *req)
+{
+ CU_ASSERT(req->cmd.opc == SPDK_OCSSD_OPC_GEOMETRY);
+ CU_ASSERT(req->cmd.nsid == expected_geometry_ns);
+}
+
+static void
+test_geometry_cmd(void)
+{
+ DECLARE_AND_CONSTRUCT_CTRLR();
+
+ struct spdk_ocssd_geometry_data geo;
+
+ verify_fn = verify_geometry_cmd;
+
+ spdk_nvme_ocssd_ctrlr_cmd_geometry(&ctrlr, expected_geometry_ns, &geo,
+ sizeof(geo), NULL, NULL);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme_ctrlr_cmd", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test ocssd ctrlr geometry cmd ", test_geometry_cmd) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ns.c/.gitignore
new file mode 100644
index 00000000..ada0ec86
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns.c/.gitignore
@@ -0,0 +1 @@
+nvme_ns_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ns.c/Makefile
new file mode 100644
index 00000000..add85ee9
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ns_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns.c/nvme_ns_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ns.c/nvme_ns_ut.c
new file mode 100644
index 00000000..cdfb4951
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns.c/nvme_ns_ut.c
@@ -0,0 +1,163 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "spdk/env.h"
+
+#include "nvme/nvme_ns.c"
+
+#include "common/lib/test_env.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME)
+
+DEFINE_STUB(spdk_nvme_wait_for_completion_robust_lock, int,
+ (struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status,
+ pthread_mutex_t *robust_mutex), 0);
+
+int
+nvme_ctrlr_cmd_identify(struct spdk_nvme_ctrlr *ctrlr, uint8_t cns, uint16_t cntid, uint32_t nsid,
+ void *payload, size_t payload_size,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return -1;
+}
+
+void
+nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+}
+
+int32_t
+spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ return -1;
+}
+
+static void
+test_nvme_ns_construct(void)
+{
+ struct spdk_nvme_ns ns = {};
+ uint32_t id = 1;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ nvme_ns_construct(&ns, id, &ctrlr);
+ CU_ASSERT(ns.id == 1);
+}
+
+static void
+test_nvme_ns_uuid(void)
+{
+ struct spdk_nvme_ns ns = {};
+ const struct spdk_uuid *uuid;
+ struct spdk_uuid expected_uuid;
+
+ memset(&expected_uuid, 0xA5, sizeof(expected_uuid));
+
+ /* Empty list - no UUID should be found */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ CU_ASSERT(uuid == NULL);
+
+ /* NGUID only (no UUID in list) */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ ns.id_desc_list[0] = 0x02; /* NIDT == NGUID */
+ ns.id_desc_list[1] = 0x10; /* NIDL */
+ memset(&ns.id_desc_list[4], 0xCC, 0x10);
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ CU_ASSERT(uuid == NULL);
+
+ /* Just UUID in the list */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ ns.id_desc_list[0] = 0x03; /* NIDT == UUID */
+ ns.id_desc_list[1] = 0x10; /* NIDL */
+ memcpy(&ns.id_desc_list[4], &expected_uuid, sizeof(expected_uuid));
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ SPDK_CU_ASSERT_FATAL(uuid != NULL);
+ CU_ASSERT(memcmp(uuid, &expected_uuid, sizeof(*uuid)) == 0);
+
+ /* UUID followed by NGUID */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ ns.id_desc_list[0] = 0x03; /* NIDT == UUID */
+ ns.id_desc_list[1] = 0x10; /* NIDL */
+ memcpy(&ns.id_desc_list[4], &expected_uuid, sizeof(expected_uuid));
+ ns.id_desc_list[20] = 0x02; /* NIDT == NGUID */
+ ns.id_desc_list[21] = 0x10; /* NIDL */
+ memset(&ns.id_desc_list[24], 0xCC, 0x10);
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ SPDK_CU_ASSERT_FATAL(uuid != NULL);
+ CU_ASSERT(memcmp(uuid, &expected_uuid, sizeof(*uuid)) == 0);
+
+ /* NGUID followed by UUID */
+ memset(ns.id_desc_list, 0, sizeof(ns.id_desc_list));
+ ns.id_desc_list[0] = 0x02; /* NIDT == NGUID */
+ ns.id_desc_list[1] = 0x10; /* NIDL */
+ memset(&ns.id_desc_list[4], 0xCC, 0x10);
+ ns.id_desc_list[20] = 0x03; /* NIDT = UUID */
+ ns.id_desc_list[21] = 0x10; /* NIDL */
+ memcpy(&ns.id_desc_list[24], &expected_uuid, sizeof(expected_uuid));
+ uuid = spdk_nvme_ns_get_uuid(&ns);
+ SPDK_CU_ASSERT_FATAL(uuid != NULL);
+ CU_ASSERT(memcmp(uuid, &expected_uuid, sizeof(*uuid)) == 0);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test_nvme_ns", test_nvme_ns_construct) == NULL ||
+ CU_add_test(suite, "test_nvme_ns_uuid", test_nvme_ns_uuid) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/.gitignore
new file mode 100644
index 00000000..5583ec23
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/.gitignore
@@ -0,0 +1 @@
+nvme_ns_cmd_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/Makefile
new file mode 100644
index 00000000..ff451d72
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ns_cmd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c
new file mode 100644
index 00000000..f17ffa35
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_cmd.c/nvme_ns_cmd_ut.c
@@ -0,0 +1,1440 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_ns_cmd.c"
+#include "nvme/nvme.c"
+
+#include "common/lib/test_env.c"
+
+DEFINE_STUB(spdk_nvme_qpair_process_completions, int32_t,
+ (struct spdk_nvme_qpair *qpair,
+ uint32_t max_completions), 0);
+
+static struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+static struct nvme_request *g_request = NULL;
+
+int
+spdk_pci_nvme_enumerate(spdk_pci_enum_cb enum_cb, void *enum_ctx)
+{
+ return -1;
+}
+
+static void nvme_request_reset_sgl(void *cb_arg, uint32_t sgl_offset)
+{
+}
+
+static int nvme_request_next_sge(void *cb_arg, void **address, uint32_t *length)
+{
+ uint32_t *lba_count = cb_arg;
+
+ /*
+ * We need to set address to something here, since the SGL splitting code will
+ * use it to determine PRP compatibility. Just use a rather arbitrary address
+ * for now - these tests will not actually cause data to be read from or written
+ * to this address.
+ */
+ *address = (void *)(uintptr_t)0x10000000;
+ *length = *lba_count;
+ return 0;
+}
+
+bool
+spdk_nvme_transport_available(enum spdk_nvme_transport_type trtype)
+{
+ return true;
+}
+
+struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ void *devhandle)
+{
+ return NULL;
+}
+
+void
+nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+}
+
+int
+nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
+{
+ return 0;
+}
+
+int
+nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+void
+nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
+{
+}
+
+struct spdk_pci_addr
+spdk_pci_device_get_addr(struct spdk_pci_device *pci_dev)
+{
+ struct spdk_pci_addr pci_addr;
+
+ memset(&pci_addr, 0, sizeof(pci_addr));
+ return pci_addr;
+}
+
+struct spdk_pci_id
+spdk_pci_device_get_id(struct spdk_pci_device *pci_dev)
+{
+ struct spdk_pci_id pci_id;
+
+ memset(&pci_id, 0xFF, sizeof(pci_id));
+
+ return pci_id;
+}
+
+void
+spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
+{
+ memset(opts, 0, sizeof(*opts));
+}
+
+uint32_t
+spdk_nvme_ns_get_sector_size(struct spdk_nvme_ns *ns)
+{
+ return ns->sector_size;
+}
+
+uint32_t
+spdk_nvme_ns_get_max_io_xfer_size(struct spdk_nvme_ns *ns)
+{
+ return ns->ctrlr->max_xfer_size;
+}
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ g_request = req;
+
+ return 0;
+}
+
+void
+nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return;
+}
+
+void
+nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return;
+}
+
+int
+nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_scan(const struct spdk_nvme_transport_id *trid,
+ void *cb_ctx,
+ spdk_nvme_probe_cb probe_cb,
+ spdk_nvme_remove_cb remove_cb,
+ bool direct_connect)
+{
+ return 0;
+}
+
+static void
+prepare_for_test(struct spdk_nvme_ns *ns, struct spdk_nvme_ctrlr *ctrlr,
+ struct spdk_nvme_qpair *qpair,
+ uint32_t sector_size, uint32_t md_size, uint32_t max_xfer_size,
+ uint32_t stripe_size, bool extended_lba)
+{
+ uint32_t num_requests = 32;
+ uint32_t i;
+
+ ctrlr->max_xfer_size = max_xfer_size;
+ /*
+ * Clear the flags field - we especially want to make sure the SGL_SUPPORTED flag is not set
+ * so that we test the SGL splitting path.
+ */
+ ctrlr->flags = 0;
+ ctrlr->min_page_size = 4096;
+ ctrlr->page_size = 4096;
+ memset(&ctrlr->opts, 0, sizeof(ctrlr->opts));
+ memset(ns, 0, sizeof(*ns));
+ ns->ctrlr = ctrlr;
+ ns->sector_size = sector_size;
+ ns->extended_lba_size = sector_size;
+ if (extended_lba) {
+ ns->flags |= SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED;
+ ns->extended_lba_size += md_size;
+ }
+ ns->md_size = md_size;
+ ns->sectors_per_max_io = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->extended_lba_size;
+ ns->sectors_per_stripe = stripe_size / ns->extended_lba_size;
+
+ memset(qpair, 0, sizeof(*qpair));
+ qpair->ctrlr = ctrlr;
+ qpair->req_buf = calloc(num_requests, sizeof(struct nvme_request));
+ SPDK_CU_ASSERT_FATAL(qpair->req_buf != NULL);
+
+ for (i = 0; i < num_requests; i++) {
+ struct nvme_request *req = qpair->req_buf + i * sizeof(struct nvme_request);
+
+ STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
+ }
+
+ g_request = NULL;
+}
+
+static void
+cleanup_after_test(struct spdk_nvme_qpair *qpair)
+{
+ free(qpair->req_buf);
+}
+
+static void
+nvme_cmd_interpret_rw(const struct spdk_nvme_cmd *cmd,
+ uint64_t *lba, uint32_t *num_blocks)
+{
+ *lba = *(const uint64_t *)&cmd->cdw10;
+ *num_blocks = (cmd->cdw12 & 0xFFFFu) + 1;
+}
+
+static void
+split_test(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_ctrlr ctrlr;
+ void *payload;
+ uint64_t lba, cmd_lba;
+ uint32_t lba_count, cmd_lba_count;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(512);
+ lba = 0;
+ lba_count = 1;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+
+ CU_ASSERT(g_request->num_children == 0);
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(cmd_lba == lba);
+ CU_ASSERT(cmd_lba_count == lba_count);
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+split_test2(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request *child;
+ void *payload;
+ uint64_t lba, cmd_lba;
+ uint32_t lba_count, cmd_lba_count;
+ int rc;
+
+ /*
+ * Controller has max xfer of 128 KB (256 blocks).
+ * Submit an I/O of 256 KB starting at LBA 0, which should be split
+ * on the max I/O boundary into two I/Os of 128 KB.
+ */
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(256 * 1024);
+ lba = 0;
+ lba_count = (256 * 1024) / 512;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+
+ CU_ASSERT(g_request->num_children == 2);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(cmd_lba == 0);
+ CU_ASSERT(cmd_lba_count == 256); /* 256 * 512 byte blocks = 128 KB */
+ nvme_free_request(child);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(cmd_lba == 256);
+ CU_ASSERT(cmd_lba_count == 256);
+ nvme_free_request(child);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_request->children));
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+split_test3(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request *child;
+ void *payload;
+ uint64_t lba, cmd_lba;
+ uint32_t lba_count, cmd_lba_count;
+ int rc;
+
+ /*
+ * Controller has max xfer of 128 KB (256 blocks).
+ * Submit an I/O of 256 KB starting at LBA 10, which should be split
+ * into two I/Os:
+ * 1) LBA = 10, count = 256 blocks
+ * 2) LBA = 266, count = 256 blocks
+ */
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(256 * 1024);
+ lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
+ lba_count = (256 * 1024) / 512;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(cmd_lba == 10);
+ CU_ASSERT(cmd_lba_count == 256);
+ nvme_free_request(child);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(cmd_lba == 266);
+ CU_ASSERT(cmd_lba_count == 256);
+ nvme_free_request(child);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_request->children));
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+split_test4(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct nvme_request *child;
+ void *payload;
+ uint64_t lba, cmd_lba;
+ uint32_t lba_count, cmd_lba_count;
+ int rc;
+
+ /*
+ * Controller has max xfer of 128 KB (256 blocks) and a stripe size of 128 KB.
+ * (Same as split_test3 except with driver-assisted striping enabled.)
+ * Submit an I/O of 256 KB starting at LBA 10, which should be split
+ * into three I/Os:
+ * 1) LBA = 10, count = 246 blocks (less than max I/O size to align to stripe size)
+ * 2) LBA = 256, count = 256 blocks (aligned to stripe size and max I/O size)
+ * 3) LBA = 512, count = 10 blocks (finish off the remaining I/O size)
+ */
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 128 * 1024, false);
+ payload = malloc(256 * 1024);
+ lba = 10; /* Start at an LBA that isn't aligned to the stripe size */
+ lba_count = (256 * 1024) / 512;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 3);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == (256 - 10) * 512);
+ CU_ASSERT(child->payload_offset == 0);
+ CU_ASSERT(cmd_lba == 10);
+ CU_ASSERT(cmd_lba_count == 256 - 10);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
+ nvme_free_request(child);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 128 * 1024);
+ CU_ASSERT(child->payload_offset == (256 - 10) * 512);
+ CU_ASSERT(cmd_lba == 256);
+ CU_ASSERT(cmd_lba_count == 256);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
+ nvme_free_request(child);
+
+ child = TAILQ_FIRST(&g_request->children);
+ nvme_request_remove_child(g_request, child);
+ nvme_cmd_interpret_rw(&child->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT(child->num_children == 0);
+ CU_ASSERT(child->payload_size == 10 * 512);
+ CU_ASSERT(child->payload_offset == (512 - 10) * 512);
+ CU_ASSERT(cmd_lba == 512);
+ CU_ASSERT(cmd_lba_count == 10);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
+ CU_ASSERT((child->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
+ nvme_free_request(child);
+
+ CU_ASSERT(TAILQ_EMPTY(&g_request->children));
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_cmd_child_request(void)
+{
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ struct nvme_request *child, *tmp;
+ void *payload;
+ uint64_t lba = 0x1000;
+ uint32_t i = 0;
+ uint32_t offset = 0;
+ uint32_t sector_size = 512;
+ uint32_t max_io_size = 128 * 1024;
+ uint32_t sectors_per_max_io = max_io_size / sector_size;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_io_size, 0, false);
+
+ payload = malloc(128 * 1024);
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io, NULL, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->payload_offset == 0);
+ CU_ASSERT(g_request->num_children == 0);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io - 1, NULL, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->payload_offset == 0);
+ CU_ASSERT(g_request->num_children == 0);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, sectors_per_max_io * 4, NULL, NULL, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->num_children == 4);
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, (DEFAULT_IO_QUEUE_REQUESTS + 1) * sector_size,
+ NULL,
+ NULL, 0);
+ SPDK_CU_ASSERT_FATAL(rc == -EINVAL);
+
+ TAILQ_FOREACH_SAFE(child, &g_request->children, child_tailq, tmp) {
+ nvme_request_remove_child(g_request, child);
+ CU_ASSERT(child->payload_offset == offset);
+ CU_ASSERT(child->cmd.opc == SPDK_NVME_OPC_READ);
+ CU_ASSERT(child->cmd.nsid == ns.id);
+ CU_ASSERT(child->cmd.cdw10 == (lba + sectors_per_max_io * i));
+ CU_ASSERT(child->cmd.cdw12 == ((sectors_per_max_io - 1) | 0));
+ offset += max_io_size;
+ nvme_free_request(child);
+ i++;
+ }
+
+ free(payload);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_flush(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_flush(&ns, &qpair, cb_fn, cb_arg);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_FLUSH);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_write_zeroes(void)
+{
+ struct spdk_nvme_ns ns = { 0 };
+ struct spdk_nvme_ctrlr ctrlr = { 0 };
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ uint64_t cmd_lba;
+ uint32_t cmd_lba_count;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_write_zeroes(&ns, &qpair, 0, 2, cb_fn, cb_arg, 0);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE_ZEROES);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ nvme_cmd_interpret_rw(&g_request->cmd, &cmd_lba, &cmd_lba_count);
+ CU_ASSERT_EQUAL(cmd_lba, 0);
+ CU_ASSERT_EQUAL(cmd_lba_count, 2);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_dataset_management(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ struct spdk_nvme_dsm_range ranges[256];
+ uint16_t i;
+ int rc = 0;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ for (i = 0; i < 256; i++) {
+ ranges[i].starting_lba = i;
+ ranges[i].length = 1;
+ ranges[i].attributes.raw = 0;
+ }
+
+ /* TRIM one LBA */
+ rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
+ ranges, 1, cb_fn, cb_arg);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == 0);
+ CU_ASSERT(g_request->cmd.cdw11 == SPDK_NVME_DSM_ATTR_DEALLOCATE);
+ spdk_dma_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+
+ /* TRIM 256 LBAs */
+ rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
+ ranges, 256, cb_fn, cb_arg);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_DATASET_MANAGEMENT);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == 255u);
+ CU_ASSERT(g_request->cmd.cdw11 == SPDK_NVME_DSM_ATTR_DEALLOCATE);
+ spdk_dma_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_dataset_management(&ns, &qpair, SPDK_NVME_DSM_ATTR_DEALLOCATE,
+ NULL, 0, cb_fn, cb_arg);
+ CU_ASSERT(rc != 0);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_readv(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ void *cb_arg;
+ uint32_t lba_count = 256;
+ uint32_t sector_size = 512;
+ uint64_t sge_length = lba_count * sector_size;
+
+ cb_arg = malloc(512);
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
+ rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, lba_count, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_READ);
+ CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
+ CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
+ CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ rc = spdk_nvme_ns_cmd_readv(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0, nvme_request_reset_sgl,
+ NULL);
+ CU_ASSERT(rc != 0);
+
+ free(cb_arg);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_writev(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ void *cb_arg;
+ uint32_t lba_count = 256;
+ uint32_t sector_size = 512;
+ uint64_t sge_length = lba_count * sector_size;
+
+ cb_arg = malloc(512);
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
+ rc = spdk_nvme_ns_cmd_writev(&ns, &qpair, 0x1000, lba_count, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_WRITE);
+ CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
+ CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
+ CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ rc = spdk_nvme_ns_cmd_writev(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0,
+ NULL, nvme_request_next_sge);
+ CU_ASSERT(rc != 0);
+
+ free(cb_arg);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_comparev(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ void *cb_arg;
+ uint32_t lba_count = 256;
+ uint32_t sector_size = 512;
+ uint64_t sge_length = lba_count * sector_size;
+
+ cb_arg = malloc(512);
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, 128 * 1024, 0, false);
+ rc = spdk_nvme_ns_cmd_comparev(&ns, &qpair, 0x1000, lba_count, NULL, &sge_length, 0,
+ nvme_request_reset_sgl, nvme_request_next_sge);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_COMPARE);
+ CU_ASSERT(nvme_payload_type(&g_request->payload) == NVME_PAYLOAD_TYPE_SGL);
+ CU_ASSERT(g_request->payload.reset_sgl_fn == nvme_request_reset_sgl);
+ CU_ASSERT(g_request->payload.next_sge_fn == nvme_request_next_sge);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == &sge_length);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ rc = spdk_nvme_ns_cmd_comparev(&ns, &qpair, 0x1000, 256, NULL, cb_arg, 0,
+ nvme_request_reset_sgl, NULL);
+ CU_ASSERT(rc != 0);
+
+ free(cb_arg);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_io_flags(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ void *payload;
+ uint64_t lba;
+ uint32_t lba_count;
+ int rc;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 128 * 1024, false);
+ payload = malloc(256 * 1024);
+ lba = 0;
+ lba_count = (4 * 1024) / 512;
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) != 0);
+ CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) == 0);
+ nvme_free_request(g_request);
+
+ rc = spdk_nvme_ns_cmd_read(&ns, &qpair, payload, lba, lba_count, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_LIMITED_RETRY);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS) == 0);
+ CU_ASSERT((g_request->cmd.cdw12 & SPDK_NVME_IO_FLAGS_LIMITED_RETRY) != 0);
+ nvme_free_request(g_request);
+
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_reservation_register(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_reservation_register_data *payload;
+ bool ignore_key = 1;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc = 0;
+ uint32_t tmp_cdw10;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(sizeof(struct spdk_nvme_reservation_register_data));
+
+ rc = spdk_nvme_ns_cmd_reservation_register(&ns, &qpair, payload, ignore_key,
+ SPDK_NVME_RESERVE_REGISTER_KEY,
+ SPDK_NVME_RESERVE_PTPL_NO_CHANGES,
+ cb_fn, cb_arg);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_REGISTER);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ tmp_cdw10 = SPDK_NVME_RESERVE_REGISTER_KEY;
+ tmp_cdw10 |= ignore_key ? 1 << 3 : 0;
+ tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_PTPL_NO_CHANGES << 30;
+
+ CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
+
+ spdk_dma_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_reservation_release(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_reservation_key_data *payload;
+ bool ignore_key = 1;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc = 0;
+ uint32_t tmp_cdw10;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(sizeof(struct spdk_nvme_reservation_key_data));
+
+ rc = spdk_nvme_ns_cmd_reservation_release(&ns, &qpair, payload, ignore_key,
+ SPDK_NVME_RESERVE_RELEASE,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
+ cb_fn, cb_arg);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_RELEASE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ tmp_cdw10 = SPDK_NVME_RESERVE_RELEASE;
+ tmp_cdw10 |= ignore_key ? 1 << 3 : 0;
+ tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_WRITE_EXCLUSIVE << 8;
+
+ CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
+
+ spdk_dma_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_reservation_acquire(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_reservation_acquire_data *payload;
+ bool ignore_key = 1;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc = 0;
+ uint32_t tmp_cdw10;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+ payload = malloc(sizeof(struct spdk_nvme_reservation_acquire_data));
+
+ rc = spdk_nvme_ns_cmd_reservation_acquire(&ns, &qpair, payload, ignore_key,
+ SPDK_NVME_RESERVE_ACQUIRE,
+ SPDK_NVME_RESERVE_WRITE_EXCLUSIVE,
+ cb_fn, cb_arg);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_ACQUIRE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ tmp_cdw10 = SPDK_NVME_RESERVE_ACQUIRE;
+ tmp_cdw10 |= ignore_key ? 1 << 3 : 0;
+ tmp_cdw10 |= (uint32_t)SPDK_NVME_RESERVE_WRITE_EXCLUSIVE << 8;
+
+ CU_ASSERT(g_request->cmd.cdw10 == tmp_cdw10);
+
+ spdk_dma_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_reservation_report(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ struct spdk_nvme_reservation_status_data *payload;
+ spdk_nvme_cmd_cb cb_fn = NULL;
+ void *cb_arg = NULL;
+ int rc = 0;
+ uint32_t size = sizeof(struct spdk_nvme_reservation_status_data);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 0, 128 * 1024, 0, false);
+
+ payload = calloc(1, size);
+ SPDK_CU_ASSERT_FATAL(payload != NULL);
+
+ rc = spdk_nvme_ns_cmd_reservation_report(&ns, &qpair, payload, size, cb_fn, cb_arg);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ CU_ASSERT(g_request->cmd.opc == SPDK_NVME_OPC_RESERVATION_REPORT);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+
+ CU_ASSERT(g_request->cmd.cdw10 == (size / 4));
+
+ spdk_dma_free(g_request->payload.contig_or_cb_arg);
+ nvme_free_request(g_request);
+ free(payload);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ns_cmd_write_with_md(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ char *buffer = NULL;
+ char *metadata = NULL;
+ uint32_t block_size, md_size;
+ struct nvme_request *child0, *child1;
+
+ block_size = 512;
+ md_size = 128;
+
+ buffer = malloc((block_size + md_size) * 384);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ metadata = malloc(md_size * 384);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL, 0, 0,
+ 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * (512 + 128) bytes per block = two I/Os:
+ * child 0: 204 blocks - 204 * (512 + 128) = 127.5 KB
+ * child 1: 52 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL, 0, 0,
+ 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 204 * (512 + 128));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 204 * (512 + 128));
+ CU_ASSERT(child1->payload_size == 52 * (512 + 128));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * No protection information
+ *
+ * 256 blocks * (512 + 8) bytes per block = two I/Os:
+ * child 0: 252 blocks - 252 * (512 + 8) = 127.96875 KB
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL, 0, 0,
+ 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * (512 + 8));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * (512 + 8));
+ CU_ASSERT(child1->payload_size == 4 * (512 + 8));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * Special case for 8-byte metadata + PI + PRACT: no metadata transferred
+ * In theory, 256 blocks * 512 bytes per block = one I/O (128 KB)
+ * However, the splitting code does not account for PRACT when calculating
+ * max sectors per transfer, so we actually get two I/Os:
+ * child 0: 252 blocks
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * 512); /* NOTE: does not include metadata! */
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * 512);
+ CU_ASSERT(child1->payload_size == 4 * 512);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * 384 blocks * 512 bytes = two I/Os:
+ * child 0: 256 blocks
+ * child 1: 128 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_write_with_md(&ns, &qpair, buffer, metadata, 0x1000, 384, NULL, NULL,
+ SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 256 * 512);
+ CU_ASSERT(child0->md_offset == 0);
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload_offset == 256 * 512);
+ CU_ASSERT(child1->payload_size == 128 * 512);
+ CU_ASSERT(child1->md_offset == 256 * 8);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ns_cmd_read_with_md(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ char *buffer = NULL;
+ char *metadata = NULL;
+ uint32_t block_size, md_size;
+
+ block_size = 512;
+ md_size = 128;
+
+ buffer = malloc(block_size * 256);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ metadata = malloc(md_size * 256);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_read_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256, NULL, NULL, 0, 0,
+ 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ns_cmd_compare_with_md(void)
+{
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+ int rc = 0;
+ char *buffer = NULL;
+ char *metadata = NULL;
+ uint32_t block_size, md_size;
+ struct nvme_request *child0, *child1;
+
+ block_size = 512;
+ md_size = 128;
+
+ buffer = malloc((block_size + md_size) * 384);
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ metadata = malloc(md_size * 384);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * 512 bytes per block = single 128 KB I/O (no splitting required)
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, false);
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256,
+ NULL, NULL, 0, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 128 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ *
+ * 256 blocks * (512 + 128) bytes per block = two I/Os:
+ * child 0: 204 blocks - 204 * (512 + 128) = 127.5 KB
+ * child 1: 52 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 128, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256,
+ NULL, NULL, 0, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 204 * (512 + 128));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 204 * (512 + 128));
+ CU_ASSERT(child1->payload_size == 52 * (512 + 128));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * No protection information
+ *
+ * 256 blocks * (512 + 8) bytes per block = two I/Os:
+ * child 0: 252 blocks - 252 * (512 + 8) = 127.96875 KB
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256,
+ NULL, NULL, 0, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload.md == NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * (512 + 8));
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * (512 + 8));
+ CU_ASSERT(child1->payload_size == 4 * (512 + 8));
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Extended LBA
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * Special case for 8-byte metadata + PI + PRACT: no metadata transferred
+ * In theory, 256 blocks * 512 bytes per block = one I/O (128 KB)
+ * However, the splitting code does not account for PRACT when calculating
+ * max sectors per transfer, so we actually get two I/Os:
+ * child 0: 252 blocks
+ * child 1: 4 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, true);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, NULL, 0x1000, 256,
+ NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 252 * 512); /* NOTE: does not include metadata! */
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload.md == NULL);
+ CU_ASSERT(child1->payload_offset == 252 * 512);
+ CU_ASSERT(child1->payload_size == 4 * 512);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 256,
+ NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == 256 * 512);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ /*
+ * 512 byte data + 8 byte metadata
+ * Separate metadata buffer
+ * Max data transfer size 128 KB
+ * No stripe size
+ * Protection information enabled + PRACT
+ *
+ * 384 blocks * 512 bytes = two I/Os:
+ * child 0: 256 blocks
+ * child 1: 128 blocks
+ */
+ prepare_for_test(&ns, &ctrlr, &qpair, 512, 8, 128 * 1024, 0, false);
+ ns.flags |= SPDK_NVME_NS_DPS_PI_SUPPORTED;
+
+ rc = spdk_nvme_ns_cmd_compare_with_md(&ns, &qpair, buffer, metadata, 0x1000, 384,
+ NULL, NULL, SPDK_NVME_IO_FLAGS_PRACT, 0, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 2);
+ child0 = TAILQ_FIRST(&g_request->children);
+
+ SPDK_CU_ASSERT_FATAL(child0 != NULL);
+ CU_ASSERT(child0->payload_offset == 0);
+ CU_ASSERT(child0->payload_size == 256 * 512);
+ CU_ASSERT(child0->md_offset == 0);
+ child1 = TAILQ_NEXT(child0, child_tailq);
+
+ SPDK_CU_ASSERT_FATAL(child1 != NULL);
+ CU_ASSERT(child1->payload_offset == 256 * 512);
+ CU_ASSERT(child1->payload_size == 128 * 512);
+ CU_ASSERT(child1->md_offset == 256 * 8);
+
+ nvme_request_free_children(g_request);
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme_ns_cmd", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "split_test", split_test) == NULL
+ || CU_add_test(suite, "split_test2", split_test2) == NULL
+ || CU_add_test(suite, "split_test3", split_test3) == NULL
+ || CU_add_test(suite, "split_test4", split_test4) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_flush", test_nvme_ns_cmd_flush) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_dataset_management",
+ test_nvme_ns_cmd_dataset_management) == NULL
+ || CU_add_test(suite, "io_flags", test_io_flags) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_write_zeroes", test_nvme_ns_cmd_write_zeroes) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_reservation_register",
+ test_nvme_ns_cmd_reservation_register) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_reservation_release",
+ test_nvme_ns_cmd_reservation_release) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_reservation_acquire",
+ test_nvme_ns_cmd_reservation_acquire) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_reservation_report", test_nvme_ns_cmd_reservation_report) == NULL
+ || CU_add_test(suite, "test_cmd_child_request", test_cmd_child_request) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_readv", test_nvme_ns_cmd_readv) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_read_with_md", test_nvme_ns_cmd_read_with_md) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_writev", test_nvme_ns_cmd_writev) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_write_with_md", test_nvme_ns_cmd_write_with_md) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_comparev", test_nvme_ns_cmd_comparev) == NULL
+ || CU_add_test(suite, "nvme_ns_cmd_compare_with_md", test_nvme_ns_cmd_compare_with_md) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ g_spdk_nvme_driver = &_g_nvme_driver;
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/.gitignore
new file mode 100644
index 00000000..8f4f47a1
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/.gitignore
@@ -0,0 +1 @@
+nvme_ns_ocssd_cmd_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/Makefile
new file mode 100644
index 00000000..35fdb83a
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_ns_ocssd_cmd_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut.c b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut.c
new file mode 100644
index 00000000..2d13b7a6
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_ns_ocssd_cmd.c/nvme_ns_ocssd_cmd_ut.c
@@ -0,0 +1,677 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_ns_ocssd_cmd.c"
+#include "nvme/nvme_ns_cmd.c"
+#include "nvme/nvme.c"
+
+#include "common/lib/test_env.c"
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+DEFINE_STUB(spdk_nvme_qpair_process_completions, int32_t,
+ (struct spdk_nvme_qpair *qpair,
+ uint32_t max_completions), 0);
+
+static struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+static struct nvme_request *g_request = NULL;
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ g_request = req;
+
+ return 0;
+}
+
+void
+nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+}
+
+void
+nvme_ctrlr_proc_get_ref(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return;
+}
+
+
+int
+nvme_ctrlr_process_init(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+void
+nvme_ctrlr_proc_put_ref(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return;
+}
+
+void
+spdk_nvme_ctrlr_get_default_ctrlr_opts(struct spdk_nvme_ctrlr_opts *opts, size_t opts_size)
+{
+ memset(opts, 0, sizeof(*opts));
+}
+
+bool
+spdk_nvme_transport_available(enum spdk_nvme_transport_type trtype)
+{
+ return true;
+}
+
+struct spdk_nvme_ctrlr *nvme_transport_ctrlr_construct(const struct spdk_nvme_transport_id *trid,
+ const struct spdk_nvme_ctrlr_opts *opts,
+ void *devhandle)
+{
+ return NULL;
+}
+
+int
+nvme_ctrlr_get_ref_count(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+int
+nvme_transport_ctrlr_scan(const struct spdk_nvme_transport_id *trid,
+ void *cb_ctx,
+ spdk_nvme_probe_cb probe_cb,
+ spdk_nvme_remove_cb remove_cb,
+ bool direct_connect)
+{
+ return 0;
+}
+
+uint32_t
+spdk_nvme_ns_get_max_io_xfer_size(struct spdk_nvme_ns *ns)
+{
+ return ns->ctrlr->max_xfer_size;
+}
+
+static void
+prepare_for_test(struct spdk_nvme_ns *ns, struct spdk_nvme_ctrlr *ctrlr,
+ struct spdk_nvme_qpair *qpair,
+ uint32_t sector_size, uint32_t md_size, uint32_t max_xfer_size,
+ uint32_t stripe_size, bool extended_lba)
+{
+ uint32_t num_requests = 32;
+ uint32_t i;
+
+ ctrlr->max_xfer_size = max_xfer_size;
+ /*
+ * Clear the flags field - we especially want to make sure the SGL_SUPPORTED flag is not set
+ * so that we test the SGL splitting path.
+ */
+ ctrlr->flags = 0;
+ ctrlr->min_page_size = PAGE_SIZE;
+ ctrlr->page_size = PAGE_SIZE;
+ memset(&ctrlr->opts, 0, sizeof(ctrlr->opts));
+ memset(ns, 0, sizeof(*ns));
+ ns->ctrlr = ctrlr;
+ ns->sector_size = sector_size;
+ ns->extended_lba_size = sector_size;
+ if (extended_lba) {
+ ns->flags |= SPDK_NVME_NS_EXTENDED_LBA_SUPPORTED;
+ ns->extended_lba_size += md_size;
+ }
+ ns->md_size = md_size;
+ ns->sectors_per_max_io = spdk_nvme_ns_get_max_io_xfer_size(ns) / ns->extended_lba_size;
+ ns->sectors_per_stripe = stripe_size / ns->extended_lba_size;
+
+ memset(qpair, 0, sizeof(*qpair));
+ qpair->ctrlr = ctrlr;
+ qpair->req_buf = calloc(num_requests, sizeof(struct nvme_request));
+ SPDK_CU_ASSERT_FATAL(qpair->req_buf != NULL);
+
+ for (i = 0; i < num_requests; i++) {
+ struct nvme_request *req = qpair->req_buf + i * sizeof(struct nvme_request);
+
+ STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
+ }
+
+ g_request = NULL;
+}
+
+static void
+cleanup_after_test(struct spdk_nvme_qpair *qpair)
+{
+ free(qpair->req_buf);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_reset_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ uint64_t lba_list = 0x12345678;
+ spdk_nvme_ocssd_ns_cmd_vector_reset(&ns, &qpair, &lba_list, 1,
+ NULL, NULL, NULL);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_RESET);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_reset(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ uint64_t lba_list[vector_size];
+ spdk_nvme_ocssd_ns_cmd_vector_reset(&ns, &qpair, lba_list, vector_size,
+ NULL, NULL, NULL);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_RESET);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_read_with_md_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+ const uint32_t md_size = 0x80;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size);
+ char *metadata = malloc(md_size);
+ uint64_t lba_list = 0x12345678;
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
+ rc = spdk_nvme_ocssd_ns_cmd_vector_read_with_md(&ns, &qpair, buffer, metadata,
+ &lba_list, 1, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == PAGE_SIZE);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_read_with_md(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+ const uint32_t md_size = 0x80;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size * vector_size);
+ char *metadata = malloc(md_size * vector_size);
+ uint64_t lba_list[vector_size];
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
+ rc = spdk_nvme_ocssd_ns_cmd_vector_read_with_md(&ns, &qpair, buffer, metadata,
+ lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == max_xfer_size);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_read_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size);
+ uint64_t lba_list = 0x12345678;
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ rc = spdk_nvme_ocssd_ns_cmd_vector_read(&ns, &qpair, buffer, &lba_list, 1,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload_size == PAGE_SIZE);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+ free(buffer);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_read(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size * vector_size);
+ uint64_t lba_list[vector_size];
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ rc = spdk_nvme_ocssd_ns_cmd_vector_read(&ns, &qpair, buffer, lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload_size == max_xfer_size);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_READ);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+ free(buffer);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_write_with_md_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+ const uint32_t md_size = 0x80;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size);
+ char *metadata = malloc(md_size);
+ uint64_t lba_list = 0x12345678;
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_write_with_md(&ns, &qpair, buffer, metadata,
+ &lba_list, 1, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == PAGE_SIZE);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+
+static void
+test_nvme_ocssd_ns_cmd_vector_write_with_md(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+ const uint32_t md_size = 0x80;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size * vector_size);
+ char *metadata = malloc(md_size * vector_size);
+ uint64_t lba_list[vector_size];
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+ SPDK_CU_ASSERT_FATAL(metadata != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, md_size, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_write_with_md(&ns, &qpair, buffer, metadata,
+ lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload.md == metadata);
+ CU_ASSERT(g_request->payload_size == max_xfer_size);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+ free(metadata);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_write_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size);
+ uint64_t lba_list = 0x12345678;
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_write(&ns, &qpair, buffer,
+ &lba_list, 1, NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload_size == PAGE_SIZE);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_write(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ char *buffer = malloc(sector_size * vector_size);
+ uint64_t lba_list[vector_size];
+
+ SPDK_CU_ASSERT_FATAL(buffer != NULL);
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_write(&ns, &qpair, buffer,
+ lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+
+ CU_ASSERT(g_request->payload_size == max_xfer_size);
+ CU_ASSERT(g_request->payload.contig_or_cb_arg == buffer);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_WRITE);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+
+ free(buffer);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_copy_single_entry(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ uint64_t src_lba_list = 0x12345678;
+ uint64_t dst_lba_list = 0x87654321;
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_copy(&ns, &qpair, &dst_lba_list, &src_lba_list, 1,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_COPY);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw10 == src_lba_list);
+ CU_ASSERT(g_request->cmd.cdw12 == 0);
+ CU_ASSERT(g_request->cmd.cdw14 == dst_lba_list);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+static void
+test_nvme_ocssd_ns_cmd_vector_copy(void)
+{
+ const uint32_t max_xfer_size = 0x10000;
+ const uint32_t sector_size = 0x1000;
+ const uint32_t vector_size = 0x10;
+
+ struct spdk_nvme_ns ns;
+ struct spdk_nvme_ctrlr ctrlr;
+ struct spdk_nvme_qpair qpair;
+
+ int rc = 0;
+
+ uint64_t src_lba_list[vector_size];
+ uint64_t dst_lba_list[vector_size];
+
+ prepare_for_test(&ns, &ctrlr, &qpair, sector_size, 0, max_xfer_size, 0, false);
+ spdk_nvme_ocssd_ns_cmd_vector_copy(&ns, &qpair,
+ dst_lba_list, src_lba_list, vector_size,
+ NULL, NULL, 0);
+
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ SPDK_CU_ASSERT_FATAL(g_request != NULL);
+ SPDK_CU_ASSERT_FATAL(g_request->num_children == 0);
+ CU_ASSERT(g_request->cmd.opc == SPDK_OCSSD_OPC_VECTOR_COPY);
+ CU_ASSERT(g_request->cmd.nsid == ns.id);
+ CU_ASSERT(g_request->cmd.cdw12 == vector_size - 1);
+
+ nvme_free_request(g_request);
+ cleanup_after_test(&qpair);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme_ns_cmd", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "nvme_ns_ocssd_cmd_vector_reset", test_nvme_ocssd_ns_cmd_vector_reset) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_reset_single_entry",
+ test_nvme_ocssd_ns_cmd_vector_reset_single_entry) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_read_with_md",
+ test_nvme_ocssd_ns_cmd_vector_read_with_md) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_read_with_md_single_entry",
+ test_nvme_ocssd_ns_cmd_vector_read_with_md_single_entry) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_read", test_nvme_ocssd_ns_cmd_vector_read) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_read_single_entry",
+ test_nvme_ocssd_ns_cmd_vector_read_single_entry) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_write_with_md",
+ test_nvme_ocssd_ns_cmd_vector_write_with_md) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_write_with_md_single_entry",
+ test_nvme_ocssd_ns_cmd_vector_write_with_md_single_entry) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_write", test_nvme_ocssd_ns_cmd_vector_write) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_write_single_entry",
+ test_nvme_ocssd_ns_cmd_vector_write_single_entry) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_copy", test_nvme_ocssd_ns_cmd_vector_copy) == NULL
+ || CU_add_test(suite, "nvme_ocssd_ns_cmd_vector_copy_single_entry",
+ test_nvme_ocssd_ns_cmd_vector_copy_single_entry) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ g_spdk_nvme_driver = &_g_nvme_driver;
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_pcie.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/.gitignore
new file mode 100644
index 00000000..8fc29109
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/.gitignore
@@ -0,0 +1 @@
+nvme_pcie_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_pcie.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/Makefile
new file mode 100644
index 00000000..09032a93
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_pcie_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c
new file mode 100644
index 00000000..2bec5865
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_pcie.c/nvme_pcie_ut.c
@@ -0,0 +1,861 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+
+#include "nvme/nvme_pcie.c"
+
+pid_t g_spdk_nvme_pid;
+
+DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
+DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
+
+DEFINE_STUB(spdk_nvme_ctrlr_get_process,
+ struct spdk_nvme_ctrlr_process *,
+ (struct spdk_nvme_ctrlr *ctrlr, pid_t pid),
+ NULL);
+
+DEFINE_STUB(spdk_nvme_ctrlr_get_current_process,
+ struct spdk_nvme_ctrlr_process *,
+ (struct spdk_nvme_ctrlr *ctrlr),
+ NULL);
+
+DEFINE_STUB(spdk_nvme_wait_for_completion, int,
+ (struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status), 0);
+
+struct spdk_trace_flag SPDK_LOG_NVME = {
+ .name = "nvme",
+ .enabled = false,
+};
+
+static struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+struct nvme_driver *g_spdk_nvme_driver = &_g_nvme_driver;
+
+int32_t spdk_nvme_retry_count = 1;
+
+struct nvme_request *g_request = NULL;
+
+extern bool ut_fail_vtophys;
+
+bool fail_next_sge = false;
+
+struct io_request {
+ uint64_t address_offset;
+ bool invalid_addr;
+ bool invalid_second_addr;
+};
+
+void
+nvme_ctrlr_fail(struct spdk_nvme_ctrlr *ctrlr, bool hot_remove)
+{
+ abort();
+}
+
+int
+spdk_uevent_connect(void)
+{
+ abort();
+}
+
+int
+spdk_get_uevent(int fd, struct spdk_uevent *uevent)
+{
+ abort();
+}
+
+struct spdk_pci_id
+spdk_pci_device_get_id(struct spdk_pci_device *dev)
+{
+ abort();
+}
+
+int
+nvme_qpair_init(struct spdk_nvme_qpair *qpair, uint16_t id,
+ struct spdk_nvme_ctrlr *ctrlr,
+ enum spdk_nvme_qprio qprio,
+ uint32_t num_requests)
+{
+ abort();
+}
+
+void
+nvme_qpair_deinit(struct spdk_nvme_qpair *qpair)
+{
+ abort();
+}
+
+int
+spdk_pci_nvme_enumerate(spdk_pci_enum_cb enum_cb, void *enum_ctx)
+{
+ abort();
+}
+
+int
+spdk_pci_nvme_device_attach(spdk_pci_enum_cb enum_cb, void *enum_ctx,
+ struct spdk_pci_addr *pci_address)
+{
+ abort();
+}
+
+void
+spdk_pci_device_detach(struct spdk_pci_device *device)
+{
+ abort();
+}
+
+int
+spdk_pci_device_map_bar(struct spdk_pci_device *dev, uint32_t bar,
+ void **mapped_addr, uint64_t *phys_addr, uint64_t *size)
+{
+ abort();
+}
+
+int
+spdk_pci_device_unmap_bar(struct spdk_pci_device *dev, uint32_t bar, void *addr)
+{
+ abort();
+}
+
+struct spdk_pci_addr
+spdk_pci_device_get_addr(struct spdk_pci_device *dev)
+{
+ abort();
+}
+
+int
+spdk_pci_device_cfg_read32(struct spdk_pci_device *dev, uint32_t *value, uint32_t offset)
+{
+ abort();
+}
+
+int
+spdk_pci_device_cfg_write32(struct spdk_pci_device *dev, uint32_t value, uint32_t offset)
+{
+ abort();
+}
+
+int
+spdk_pci_device_claim(const struct spdk_pci_addr *pci_addr)
+{
+ abort();
+}
+
+int
+nvme_ctrlr_construct(struct spdk_nvme_ctrlr *ctrlr)
+{
+ abort();
+}
+
+void
+nvme_ctrlr_destruct_finish(struct spdk_nvme_ctrlr *ctrlr)
+{
+ abort();
+}
+
+void
+nvme_ctrlr_destruct(struct spdk_nvme_ctrlr *ctrlr)
+{
+ abort();
+}
+
+int
+nvme_ctrlr_add_process(struct spdk_nvme_ctrlr *ctrlr, void *devhandle)
+{
+ abort();
+}
+
+void
+nvme_ctrlr_free_processes(struct spdk_nvme_ctrlr *ctrlr)
+{
+ abort();
+}
+
+struct spdk_pci_device *
+nvme_ctrlr_proc_get_devhandle(struct spdk_nvme_ctrlr *ctrlr)
+{
+ abort();
+}
+
+int
+nvme_ctrlr_probe(const struct spdk_nvme_transport_id *trid, void *devhandle,
+ spdk_nvme_probe_cb probe_cb, void *cb_ctx)
+{
+ abort();
+}
+
+int
+nvme_ctrlr_get_cap(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_cap_register *cap)
+{
+ abort();
+}
+
+int
+nvme_ctrlr_get_vs(struct spdk_nvme_ctrlr *ctrlr, union spdk_nvme_vs_register *vs)
+{
+ abort();
+}
+
+void
+nvme_ctrlr_init_cap(struct spdk_nvme_ctrlr *ctrlr, const union spdk_nvme_cap_register *cap,
+ const union spdk_nvme_vs_register *vs)
+{
+ abort();
+}
+
+uint64_t
+nvme_get_quirks(const struct spdk_pci_id *id)
+{
+ abort();
+}
+
+bool
+nvme_completion_is_retry(const struct spdk_nvme_cpl *cpl)
+{
+ abort();
+}
+
+void
+nvme_qpair_print_command(struct spdk_nvme_qpair *qpair, struct spdk_nvme_cmd *cmd)
+{
+ abort();
+}
+
+void
+nvme_qpair_print_completion(struct spdk_nvme_qpair *qpair, struct spdk_nvme_cpl *cpl)
+{
+ abort();
+}
+
+int
+nvme_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ abort();
+}
+
+int
+nvme_ctrlr_submit_admin_request(struct spdk_nvme_ctrlr *ctrlr,
+ struct nvme_request *req)
+{
+ abort();
+}
+
+void
+nvme_completion_poll_cb(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ abort();
+}
+
+int32_t
+spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ abort();
+}
+
+void
+nvme_qpair_enable(struct spdk_nvme_qpair *qpair)
+{
+ abort();
+}
+
+int
+nvme_request_check_timeout(struct nvme_request *req, uint16_t cid,
+ struct spdk_nvme_ctrlr_process *active_proc,
+ uint64_t now_tick)
+{
+ abort();
+}
+
+struct spdk_nvme_ctrlr *
+spdk_nvme_get_ctrlr_by_trid_unsafe(const struct spdk_nvme_transport_id *trid)
+{
+ return NULL;
+}
+
+union spdk_nvme_csts_register spdk_nvme_ctrlr_get_regs_csts(struct spdk_nvme_ctrlr *ctrlr)
+{
+ union spdk_nvme_csts_register csts = {};
+
+ return csts;
+}
+
+#if 0 /* TODO: update PCIe-specific unit test */
+static void
+nvme_request_reset_sgl(void *cb_arg, uint32_t sgl_offset)
+{
+ struct io_request *req = (struct io_request *)cb_arg;
+
+ req->address_offset = 0;
+ req->invalid_addr = false;
+ req->invalid_second_addr = false;
+ switch (sgl_offset) {
+ case 0:
+ req->invalid_addr = false;
+ break;
+ case 1:
+ req->invalid_addr = true;
+ break;
+ case 2:
+ req->invalid_addr = false;
+ req->invalid_second_addr = true;
+ break;
+ default:
+ break;
+ }
+ return;
+}
+
+static int
+nvme_request_next_sge(void *cb_arg, void **address, uint32_t *length)
+{
+ struct io_request *req = (struct io_request *)cb_arg;
+
+ if (req->address_offset == 0) {
+ if (req->invalid_addr) {
+ *address = (void *)7;
+ } else {
+ *address = (void *)(4096 * req->address_offset);
+ }
+ } else if (req->address_offset == 1) {
+ if (req->invalid_second_addr) {
+ *address = (void *)7;
+ } else {
+ *address = (void *)(4096 * req->address_offset);
+ }
+ } else {
+ *address = (void *)(4096 * req->address_offset);
+ }
+
+ req->address_offset += 1;
+ *length = 4096;
+
+ if (fail_next_sge) {
+ return - 1;
+ } else {
+ return 0;
+ }
+
+}
+
+static void
+prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
+ struct spdk_nvme_ctrlr *ctrlr)
+{
+ memset(ctrlr, 0, sizeof(*ctrlr));
+ ctrlr->free_io_qids = NULL;
+ TAILQ_INIT(&ctrlr->active_io_qpairs);
+ TAILQ_INIT(&ctrlr->active_procs);
+ nvme_qpair_init(qpair, 1, ctrlr, 0);
+
+ ut_fail_vtophys = false;
+}
+
+static void
+cleanup_submit_request_test(struct spdk_nvme_qpair *qpair)
+{
+}
+
+static void
+ut_insert_cq_entry(struct spdk_nvme_qpair *qpair, uint32_t slot)
+{
+ struct nvme_request *req;
+ struct nvme_tracker *tr;
+ struct spdk_nvme_cpl *cpl;
+
+ req = calloc(1, sizeof(*req));
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ memset(req, 0, sizeof(*req));
+
+ tr = TAILQ_FIRST(&qpair->free_tr);
+ TAILQ_REMOVE(&qpair->free_tr, tr, tq_list); /* remove tr from free_tr */
+ TAILQ_INSERT_HEAD(&qpair->outstanding_tr, tr, tq_list);
+ req->cmd.cid = tr->cid;
+ tr->req = req;
+ qpair->tr[tr->cid].active = true;
+
+ cpl = &qpair->cpl[slot];
+ cpl->status.p = qpair->phase;
+ cpl->cid = tr->cid;
+}
+
+static void
+expected_success_callback(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ CU_ASSERT(!spdk_nvme_cpl_is_error(cpl));
+}
+
+static void
+expected_failure_callback(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ CU_ASSERT(spdk_nvme_cpl_is_error(cpl));
+}
+
+static void
+test4(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct nvme_request *req;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ char payload[4096];
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+
+ req = nvme_allocate_request_contig(payload, sizeof(payload), expected_failure_callback, NULL);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ /* Force vtophys to return a failure. This should
+ * result in the nvme_qpair manually failing
+ * the request with error status to signify
+ * a bad payload buffer.
+ */
+ ut_fail_vtophys = true;
+
+ CU_ASSERT(qpair.sq_tail == 0);
+
+ CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
+
+ CU_ASSERT(qpair.sq_tail == 0);
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void
+test_sgl_req(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct nvme_request *req;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct nvme_payload payload = {};
+ struct nvme_tracker *sgl_tr = NULL;
+ uint64_t i;
+ struct io_request io_req = {};
+
+ payload = NVME_PAYLOAD_SGL(nvme_request_reset_sgl, nvme_request_next_sge, &io_req, NULL);
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+ req = nvme_allocate_request(&payload, 0x1000, NULL, &io_req);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ req->cmd.opc = SPDK_NVME_OPC_WRITE;
+ req->cmd.cdw10 = 10000;
+ req->cmd.cdw12 = 7 | 0;
+ req->payload_offset = 1;
+
+ CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
+ CU_ASSERT(qpair.sq_tail == 0);
+ cleanup_submit_request_test(&qpair);
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+ req = nvme_allocate_request(&payload, 0x1000, NULL, &io_req);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ req->cmd.opc = SPDK_NVME_OPC_WRITE;
+ req->cmd.cdw10 = 10000;
+ req->cmd.cdw12 = 7 | 0;
+ spdk_nvme_retry_count = 1;
+ fail_next_sge = true;
+
+ CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
+ CU_ASSERT(qpair.sq_tail == 0);
+ cleanup_submit_request_test(&qpair);
+
+ fail_next_sge = false;
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+ req = nvme_allocate_request(&payload, 2 * 0x1000, NULL, &io_req);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ req->cmd.opc = SPDK_NVME_OPC_WRITE;
+ req->cmd.cdw10 = 10000;
+ req->cmd.cdw12 = 15 | 0;
+ req->payload_offset = 2;
+
+ CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
+ CU_ASSERT(qpair.sq_tail == 0);
+ cleanup_submit_request_test(&qpair);
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+ req = nvme_allocate_request(&payload, (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, NULL, &io_req);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ req->cmd.opc = SPDK_NVME_OPC_WRITE;
+ req->cmd.cdw10 = 10000;
+ req->cmd.cdw12 = 4095 | 0;
+
+ CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);
+
+ CU_ASSERT(req->cmd.dptr.prp.prp1 == 0);
+ CU_ASSERT(qpair.sq_tail == 1);
+ sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
+ if (sgl_tr != NULL) {
+ for (i = 0; i < NVME_MAX_PRP_LIST_ENTRIES; i++) {
+ CU_ASSERT(sgl_tr->u.prp[i] == (0x1000 * (i + 1)));
+ }
+
+ TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
+ }
+ cleanup_submit_request_test(&qpair);
+ nvme_free_request(req);
+}
+
+static void
+test_hw_sgl_req(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct nvme_request *req;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct nvme_payload payload = {};
+ struct nvme_tracker *sgl_tr = NULL;
+ uint64_t i;
+ struct io_request io_req = {};
+
+ payload = NVME_PAYLOAD_SGL(nvme_request_reset_sgl, nvme_request_next_sge, &io_req, NULL);
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+ req = nvme_allocate_request(&payload, 0x1000, NULL, &io_req);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ req->cmd.opc = SPDK_NVME_OPC_WRITE;
+ req->cmd.cdw10 = 10000;
+ req->cmd.cdw12 = 7 | 0;
+ req->payload_offset = 0;
+ ctrlr.flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;
+
+ nvme_qpair_submit_request(&qpair, req);
+
+ sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
+ CU_ASSERT(sgl_tr != NULL);
+ CU_ASSERT(sgl_tr->u.sgl[0].generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(sgl_tr->u.sgl[0].generic.subtype == 0);
+ CU_ASSERT(sgl_tr->u.sgl[0].unkeyed.length == 4096);
+ CU_ASSERT(sgl_tr->u.sgl[0].address == 0);
+ CU_ASSERT(req->cmd.dptr.sgl1.generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
+ cleanup_submit_request_test(&qpair);
+ nvme_free_request(req);
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+ req = nvme_allocate_request(&payload, NVME_MAX_SGL_DESCRIPTORS * 0x1000, NULL, &io_req);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+ req->cmd.opc = SPDK_NVME_OPC_WRITE;
+ req->cmd.cdw10 = 10000;
+ req->cmd.cdw12 = 2023 | 0;
+ req->payload_offset = 0;
+ ctrlr.flags |= SPDK_NVME_CTRLR_SGL_SUPPORTED;
+
+ nvme_qpair_submit_request(&qpair, req);
+
+ sgl_tr = TAILQ_FIRST(&qpair.outstanding_tr);
+ CU_ASSERT(sgl_tr != NULL);
+ for (i = 0; i < NVME_MAX_SGL_DESCRIPTORS; i++) {
+ CU_ASSERT(sgl_tr->u.sgl[i].generic.type == SPDK_NVME_SGL_TYPE_DATA_BLOCK);
+ CU_ASSERT(sgl_tr->u.sgl[i].generic.subtype == 0);
+ CU_ASSERT(sgl_tr->u.sgl[i].unkeyed.length == 4096);
+ CU_ASSERT(sgl_tr->u.sgl[i].address == i * 4096);
+ }
+ CU_ASSERT(req->cmd.dptr.sgl1.generic.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
+ TAILQ_REMOVE(&qpair.outstanding_tr, sgl_tr, tq_list);
+ cleanup_submit_request_test(&qpair);
+ nvme_free_request(req);
+}
+
+static void test_nvme_qpair_fail(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct nvme_request *req = NULL;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct nvme_tracker *tr_temp;
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+
+ tr_temp = TAILQ_FIRST(&qpair.free_tr);
+ SPDK_CU_ASSERT_FATAL(tr_temp != NULL);
+ TAILQ_REMOVE(&qpair.free_tr, tr_temp, tq_list);
+ tr_temp->req = nvme_allocate_request_null(expected_failure_callback, NULL);
+ SPDK_CU_ASSERT_FATAL(tr_temp->req != NULL);
+ tr_temp->req->cmd.cid = tr_temp->cid;
+
+ TAILQ_INSERT_HEAD(&qpair.outstanding_tr, tr_temp, tq_list);
+ nvme_qpair_fail(&qpair);
+ CU_ASSERT_TRUE(TAILQ_EMPTY(&qpair.outstanding_tr));
+
+ req = nvme_allocate_request_null(expected_failure_callback, NULL);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ STAILQ_INSERT_HEAD(&qpair.queued_req, req, stailq);
+ nvme_qpair_fail(&qpair);
+ CU_ASSERT_TRUE(STAILQ_EMPTY(&qpair.queued_req));
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void
+test_nvme_qpair_process_completions_limit(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+ qpair.is_enabled = true;
+
+ /* Insert 4 entries into the completion queue */
+ CU_ASSERT(qpair.cq_head == 0);
+ ut_insert_cq_entry(&qpair, 0);
+ ut_insert_cq_entry(&qpair, 1);
+ ut_insert_cq_entry(&qpair, 2);
+ ut_insert_cq_entry(&qpair, 3);
+
+ /* This should only process 2 completions, and 2 should be left in the queue */
+ spdk_nvme_qpair_process_completions(&qpair, 2);
+ CU_ASSERT(qpair.cq_head == 2);
+
+ /* This should only process 1 completion, and 1 should be left in the queue */
+ spdk_nvme_qpair_process_completions(&qpair, 1);
+ CU_ASSERT(qpair.cq_head == 3);
+
+ /* This should process the remaining completion */
+ spdk_nvme_qpair_process_completions(&qpair, 5);
+ CU_ASSERT(qpair.cq_head == 4);
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void test_nvme_qpair_destroy(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_ctrlr ctrlr = {};
+ struct nvme_tracker *tr_temp;
+
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ TAILQ_INIT(&ctrlr.free_io_qpairs);
+ TAILQ_INIT(&ctrlr.active_io_qpairs);
+ TAILQ_INIT(&ctrlr.active_procs);
+
+ nvme_qpair_init(&qpair, 1, 128, &ctrlr);
+ nvme_qpair_destroy(&qpair);
+
+
+ nvme_qpair_init(&qpair, 0, 128, &ctrlr);
+ tr_temp = TAILQ_FIRST(&qpair.free_tr);
+ SPDK_CU_ASSERT_FATAL(tr_temp != NULL);
+ TAILQ_REMOVE(&qpair.free_tr, tr_temp, tq_list);
+ tr_temp->req = nvme_allocate_request_null(expected_failure_callback, NULL);
+ SPDK_CU_ASSERT_FATAL(tr_temp->req != NULL);
+
+ tr_temp->req->cmd.opc = SPDK_NVME_OPC_ASYNC_EVENT_REQUEST;
+ tr_temp->req->cmd.cid = tr_temp->cid;
+ TAILQ_INSERT_HEAD(&qpair.outstanding_tr, tr_temp, tq_list);
+
+ nvme_qpair_destroy(&qpair);
+ CU_ASSERT(TAILQ_EMPTY(&qpair.outstanding_tr));
+}
+#endif
+
+static void
+prp_list_prep(struct nvme_tracker *tr, struct nvme_request *req, uint32_t *prp_index)
+{
+ memset(req, 0, sizeof(*req));
+ memset(tr, 0, sizeof(*tr));
+ tr->req = req;
+ tr->prp_sgl_bus_addr = 0xDEADBEEF;
+ *prp_index = 0;
+}
+
+static void
+test_prp_list_append(void)
+{
+ struct nvme_request req;
+ struct nvme_tracker tr;
+ uint32_t prp_index;
+
+ /* Non-DWORD-aligned buffer (invalid) */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100001, 0x1000, 0x1000) == -EINVAL);
+
+ /* 512-byte buffer, 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x200, 0x1000) == 0);
+ CU_ASSERT(prp_index == 1);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+
+ /* 512-byte buffer, non-4K-aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x108000, 0x200, 0x1000) == 0);
+ CU_ASSERT(prp_index == 1);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x108000);
+
+ /* 4K buffer, 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 1);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+
+ /* 4K buffer, non-4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
+
+ /* 8K buffer, 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x2000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x101000);
+
+ /* 8K buffer, non-4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x2000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 3);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
+ CU_ASSERT(tr.u.prp[0] == 0x101000);
+ CU_ASSERT(tr.u.prp[1] == 0x102000);
+
+ /* 12K buffer, 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x3000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 3);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
+ CU_ASSERT(tr.u.prp[0] == 0x101000);
+ CU_ASSERT(tr.u.prp[1] == 0x102000);
+
+ /* 12K buffer, non-4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x3000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 4);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
+ CU_ASSERT(tr.u.prp[0] == 0x101000);
+ CU_ASSERT(tr.u.prp[1] == 0x102000);
+ CU_ASSERT(tr.u.prp[2] == 0x103000);
+
+ /* Two 4K buffers, both 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 1);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900000, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100000);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == 0x900000);
+
+ /* Two 4K buffers, first non-4K aligned, second 4K aligned */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900000, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 3);
+ CU_ASSERT(req.cmd.dptr.prp.prp1 == 0x100800);
+ CU_ASSERT(req.cmd.dptr.prp.prp2 == tr.prp_sgl_bus_addr);
+ CU_ASSERT(tr.u.prp[0] == 0x101000);
+ CU_ASSERT(tr.u.prp[1] == 0x900000);
+
+ /* Two 4K buffers, both non-4K aligned (invalid) */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800, 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == 2);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x900800, 0x1000, 0x1000) == -EINVAL);
+ CU_ASSERT(prp_index == 2);
+
+ /* 4K buffer, 4K aligned, but vtophys fails */
+ MOCK_SET(spdk_vtophys, SPDK_VTOPHYS_ERROR);
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000, 0x1000, 0x1000) == -EINVAL);
+ MOCK_CLEAR(spdk_vtophys);
+
+ /* Largest aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000,
+ (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
+
+ /* Largest non-4K-aligned buffer that can be described in NVME_MAX_PRP_LIST_ENTRIES (plus PRP1) */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800,
+ NVME_MAX_PRP_LIST_ENTRIES * 0x1000, 0x1000) == 0);
+ CU_ASSERT(prp_index == NVME_MAX_PRP_LIST_ENTRIES + 1);
+
+ /* Buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100000,
+ (NVME_MAX_PRP_LIST_ENTRIES + 2) * 0x1000, 0x1000) == -EINVAL);
+
+ /* Non-4K-aligned buffer too large to be described in NVME_MAX_PRP_LIST_ENTRIES */
+ prp_list_prep(&tr, &req, &prp_index);
+ CU_ASSERT(nvme_pcie_prp_list_append(&tr, &prp_index, (void *)0x100800,
+ (NVME_MAX_PRP_LIST_ENTRIES + 1) * 0x1000, 0x1000) == -EINVAL);
+}
+
+static void test_shadow_doorbell_update(void)
+{
+ bool ret;
+
+ /* nvme_pcie_qpair_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old) */
+ ret = nvme_pcie_qpair_need_event(10, 15, 14);
+ CU_ASSERT(ret == false);
+
+ ret = nvme_pcie_qpair_need_event(14, 15, 14);
+ CU_ASSERT(ret == true);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme_pcie", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (CU_add_test(suite, "prp_list_append", test_prp_list_append) == NULL
+ || CU_add_test(suite, "shadow_doorbell_update",
+ test_shadow_doorbell_update) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_qpair.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/.gitignore
new file mode 100644
index 00000000..1bb18e99
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/.gitignore
@@ -0,0 +1 @@
+nvme_qpair_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_qpair.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/Makefile
new file mode 100644
index 00000000..d7762a38
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_qpair_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c
new file mode 100644
index 00000000..11fea8c7
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_qpair.c/nvme_qpair_ut.c
@@ -0,0 +1,418 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "common/lib/test_env.c"
+
+pid_t g_spdk_nvme_pid;
+
+bool trace_flag = false;
+#define SPDK_LOG_NVME trace_flag
+
+#include "nvme/nvme_qpair.c"
+
+struct nvme_driver _g_nvme_driver = {
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+void
+nvme_request_remove_child(struct nvme_request *parent,
+ struct nvme_request *child)
+{
+ parent->num_children--;
+ TAILQ_REMOVE(&parent->children, child, child_tailq);
+}
+
+int
+nvme_transport_qpair_enable(struct spdk_nvme_qpair *qpair)
+{
+ return 0;
+}
+
+int
+nvme_transport_qpair_disable(struct spdk_nvme_qpair *qpair)
+{
+ return 0;
+}
+
+int
+nvme_transport_qpair_fail(struct spdk_nvme_qpair *qpair)
+{
+ return 0;
+}
+
+int
+nvme_transport_qpair_submit_request(struct spdk_nvme_qpair *qpair, struct nvme_request *req)
+{
+ // TODO
+ return 0;
+}
+
+int32_t
+nvme_transport_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ // TODO
+ return 0;
+}
+
+int
+spdk_nvme_ctrlr_free_io_qpair(struct spdk_nvme_qpair *qpair)
+{
+ return 0;
+}
+
+static void
+prepare_submit_request_test(struct spdk_nvme_qpair *qpair,
+ struct spdk_nvme_ctrlr *ctrlr)
+{
+ memset(ctrlr, 0, sizeof(*ctrlr));
+ ctrlr->free_io_qids = NULL;
+ TAILQ_INIT(&ctrlr->active_io_qpairs);
+ TAILQ_INIT(&ctrlr->active_procs);
+ nvme_qpair_init(qpair, 1, ctrlr, 0, 32);
+}
+
+static void
+cleanup_submit_request_test(struct spdk_nvme_qpair *qpair)
+{
+ free(qpair->req_buf);
+}
+
+static void
+expected_success_callback(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ CU_ASSERT(!spdk_nvme_cpl_is_error(cpl));
+}
+
+static void
+expected_failure_callback(void *arg, const struct spdk_nvme_cpl *cpl)
+{
+ CU_ASSERT(spdk_nvme_cpl_is_error(cpl));
+}
+
+static void
+test3(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct nvme_request *req;
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+
+ req = nvme_allocate_request_null(&qpair, expected_success_callback, NULL);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ CU_ASSERT(nvme_qpair_submit_request(&qpair, req) == 0);
+
+ nvme_free_request(req);
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void
+test_ctrlr_failed(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct nvme_request *req;
+ struct spdk_nvme_ctrlr ctrlr = {};
+ char payload[4096];
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+
+ req = nvme_allocate_request_contig(&qpair, payload, sizeof(payload), expected_failure_callback,
+ NULL);
+ SPDK_CU_ASSERT_FATAL(req != NULL);
+
+ /* Set the controller to failed.
+ * Set the controller to resetting so that the qpair won't get re-enabled.
+ */
+ ctrlr.is_failed = true;
+ ctrlr.is_resetting = true;
+
+ CU_ASSERT(nvme_qpair_submit_request(&qpair, req) != 0);
+
+ cleanup_submit_request_test(&qpair);
+}
+
+static void struct_packing(void)
+{
+ /* ctrlr is the first field in nvme_qpair after the fields
+ * that are used in the I/O path. Make sure the I/O path fields
+ * all fit into two cache lines.
+ */
+ CU_ASSERT(offsetof(struct spdk_nvme_qpair, ctrlr) <= 128);
+}
+
+static void test_nvme_qpair_process_completions(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_ctrlr ctrlr = {};
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+ qpair.ctrlr->is_resetting = true;
+
+ spdk_nvme_qpair_process_completions(&qpair, 0);
+ cleanup_submit_request_test(&qpair);
+}
+
+static void test_nvme_completion_is_retry(void)
+{
+ struct spdk_nvme_cpl cpl = {};
+
+ cpl.status.sct = SPDK_NVME_SCT_GENERIC;
+ cpl.status.sc = SPDK_NVME_SC_NAMESPACE_NOT_READY;
+ cpl.status.dnr = 0;
+ CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_FORMAT_IN_PROGRESS;
+ cpl.status.dnr = 1;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+ cpl.status.dnr = 0;
+ CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_OPCODE;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_FIELD;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_COMMAND_ID_CONFLICT;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_DATA_TRANSFER_ERROR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ABORTED_POWER_LOSS;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ABORTED_BY_REQUEST;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ABORTED_FAILED_FUSED;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ABORTED_MISSING_FUSED;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_SGL_SEG_DESCRIPTOR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_NUM_SGL_DESCIRPTORS;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_METADATA_SGL_LENGTH_INVALID;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_CONTROLLER_MEM_BUF;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_INVALID_PRP_OFFSET;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_CAPACITY_EXCEEDED;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = SPDK_NVME_SC_RESERVATION_CONFLICT;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sc = 0x70;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_MEDIA_ERROR;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_PATH;
+ cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
+ cpl.status.dnr = 0;
+ CU_ASSERT_TRUE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_PATH;
+ cpl.status.sc = SPDK_NVME_SC_INTERNAL_PATH_ERROR;
+ cpl.status.dnr = 1;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = SPDK_NVME_SCT_VENDOR_SPECIFIC;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+
+ cpl.status.sct = 0x4;
+ CU_ASSERT_FALSE(nvme_completion_is_retry(&cpl));
+}
+
+#ifdef DEBUG
+static void
+test_get_status_string(void)
+{
+ const char *status_string;
+
+ status_string = get_status_string(SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(strcmp(status_string, "SUCCESS") == 0);
+
+ status_string = get_status_string(SPDK_NVME_SCT_COMMAND_SPECIFIC,
+ SPDK_NVME_SC_COMPLETION_QUEUE_INVALID);
+ CU_ASSERT(strcmp(status_string, "INVALID COMPLETION QUEUE") == 0);
+
+ status_string = get_status_string(SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
+ CU_ASSERT(strcmp(status_string, "UNRECOVERED READ ERROR") == 0);
+
+ status_string = get_status_string(SPDK_NVME_SCT_VENDOR_SPECIFIC, 0);
+ CU_ASSERT(strcmp(status_string, "VENDOR SPECIFIC") == 0);
+
+ status_string = get_status_string(100, 0);
+ CU_ASSERT(strcmp(status_string, "RESERVED") == 0);
+}
+#endif
+
+static void
+test_nvme_qpair_add_cmd_error_injection(void)
+{
+ struct spdk_nvme_qpair qpair = {};
+ struct spdk_nvme_ctrlr ctrlr = {};
+ int rc;
+
+ prepare_submit_request_test(&qpair, &ctrlr);
+ ctrlr.adminq = &qpair;
+
+ /* Admin error injection at submission path */
+ rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, NULL,
+ SPDK_NVME_OPC_GET_FEATURES, true, 5000, 1,
+ SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_INVALID_FIELD);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ /* Remove cmd error injection */
+ spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, NULL, SPDK_NVME_OPC_GET_FEATURES);
+
+ CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ /* IO error injection at completion path */
+ rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
+ SPDK_NVME_OPC_READ, false, 0, 1,
+ SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ /* Provide the same opc, and check whether allocate a new entry */
+ rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
+ SPDK_NVME_OPC_READ, false, 0, 1,
+ SPDK_NVME_SCT_MEDIA_ERROR, SPDK_NVME_SC_UNRECOVERED_READ_ERROR);
+
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&qpair.err_cmd_head));
+ CU_ASSERT(TAILQ_NEXT(TAILQ_FIRST(&qpair.err_cmd_head), link) == NULL);
+
+ /* Remove cmd error injection */
+ spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_READ);
+
+ CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ rc = spdk_nvme_qpair_add_cmd_error_injection(&ctrlr, &qpair,
+ SPDK_NVME_OPC_COMPARE, true, 0, 5,
+ SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_COMPARE_FAILURE);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(!TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ /* Remove cmd error injection */
+ spdk_nvme_qpair_remove_cmd_error_injection(&ctrlr, &qpair, SPDK_NVME_OPC_COMPARE);
+
+ CU_ASSERT(TAILQ_EMPTY(&qpair.err_cmd_head));
+
+ cleanup_submit_request_test(&qpair);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme_qpair", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (CU_add_test(suite, "test3", test3) == NULL
+ || CU_add_test(suite, "ctrlr_failed", test_ctrlr_failed) == NULL
+ || CU_add_test(suite, "struct_packing", struct_packing) == NULL
+ || CU_add_test(suite, "spdk_nvme_qpair_process_completions",
+ test_nvme_qpair_process_completions) == NULL
+ || CU_add_test(suite, "nvme_completion_is_retry", test_nvme_completion_is_retry) == NULL
+#ifdef DEBUG
+ || CU_add_test(suite, "get_status_string", test_get_status_string) == NULL
+#endif
+ || CU_add_test(suite, "spdk_nvme_qpair_add_cmd_error_injection",
+ test_nvme_qpair_add_cmd_error_injection) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_quirks.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/.gitignore
new file mode 100644
index 00000000..eca86651
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/.gitignore
@@ -0,0 +1 @@
+nvme_quirks_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_quirks.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/Makefile
new file mode 100644
index 00000000..d86887f0
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_quirks_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_quirks.c/nvme_quirks_ut.c b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/nvme_quirks_ut.c
new file mode 100644
index 00000000..95fdd143
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_quirks.c/nvme_quirks_ut.c
@@ -0,0 +1,102 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk_cunit.h"
+
+#include "nvme/nvme_quirks.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME)
+
+static void
+test_nvme_quirks_striping(void)
+{
+ struct spdk_pci_id pci_id = {};
+ uint64_t quirks = 0;
+
+ /* Non-Intel device should not have striping enabled */
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) == 0);
+
+ /* Set the vendor id to Intel, but no device id. No striping. */
+ pci_id.vendor_id = SPDK_PCI_VID_INTEL;
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) == 0);
+
+ /* Device ID 0x0953 should have striping enabled */
+ pci_id.device_id = 0x0953;
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) != 0);
+
+ /* Even if specific subvendor/subdevice ids are set,
+ * striping should be enabled.
+ */
+ pci_id.subvendor_id = SPDK_PCI_VID_INTEL;
+ pci_id.subdevice_id = 0x3704;
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) != 0);
+
+ pci_id.subvendor_id = 1234;
+ pci_id.subdevice_id = 42;
+ quirks = nvme_get_quirks(&pci_id);
+ CU_ASSERT((quirks & NVME_INTEL_QUIRK_STRIPING) != 0);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme_quirks", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test nvme_quirks striping",
+ test_nvme_quirks_striping) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvme/nvme_rdma.c/.gitignore b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/.gitignore
new file mode 100644
index 00000000..66265b95
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/.gitignore
@@ -0,0 +1 @@
+nvme_rdma_ut
diff --git a/src/spdk/test/unit/lib/nvme/nvme_rdma.c/Makefile b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/Makefile
new file mode 100644
index 00000000..7ea42632
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = nvme_rdma_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c
new file mode 100644
index 00000000..87835ab6
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvme/nvme_rdma.c/nvme_rdma_ut.c
@@ -0,0 +1,298 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk_cunit.h"
+#include "common/lib/test_env.c"
+#include "nvme/nvme_rdma.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvme", SPDK_LOG_NVME)
+
+DEFINE_STUB(nvme_qpair_submit_request, int, (struct spdk_nvme_qpair *qpair,
+ struct nvme_request *req), 0);
+
+DEFINE_STUB(nvme_qpair_init, int, (struct spdk_nvme_qpair *qpair, uint16_t id,
+ struct spdk_nvme_ctrlr *ctrlr, enum spdk_nvme_qprio qprio, uint32_t num_requests), 0);
+
+DEFINE_STUB_V(nvme_qpair_deinit, (struct spdk_nvme_qpair *qpair));
+
+DEFINE_STUB(nvme_ctrlr_probe, int, (const struct spdk_nvme_transport_id *trid, void *devhandle,
+ spdk_nvme_probe_cb probe_cb, void *cb_ctx), 0);
+
+DEFINE_STUB(nvme_ctrlr_get_cap, int, (struct spdk_nvme_ctrlr *ctrlr,
+ union spdk_nvme_cap_register *cap), 0);
+
+DEFINE_STUB(nvme_ctrlr_get_vs, int, (struct spdk_nvme_ctrlr *ctrlr,
+ union spdk_nvme_vs_register *vs), 0);
+
+DEFINE_STUB_V(nvme_ctrlr_init_cap, (struct spdk_nvme_ctrlr *ctrlr,
+ const union spdk_nvme_cap_register *cap, const union spdk_nvme_vs_register *vs));
+
+DEFINE_STUB(nvme_ctrlr_construct, int, (struct spdk_nvme_ctrlr *ctrlr), 0);
+
+DEFINE_STUB_V(nvme_ctrlr_destruct, (struct spdk_nvme_ctrlr *ctrlr));
+
+DEFINE_STUB(nvme_ctrlr_add_process, int, (struct spdk_nvme_ctrlr *ctrlr, void *devhandle), 0);
+
+DEFINE_STUB_V(nvme_ctrlr_connected, (struct spdk_nvme_ctrlr *ctrlr));
+
+DEFINE_STUB(nvme_ctrlr_cmd_identify, int, (struct spdk_nvme_ctrlr *ctrlr, uint8_t cns,
+ uint16_t cntid, uint32_t nsid, void *payload, size_t payload_size, spdk_nvme_cmd_cb cb_fn,
+ void *cb_arg), 0);
+
+DEFINE_STUB_V(spdk_nvme_ctrlr_get_default_ctrlr_opts, (struct spdk_nvme_ctrlr_opts *opts,
+ size_t opts_size));
+
+DEFINE_STUB_V(nvme_completion_poll_cb, (void *arg, const struct spdk_nvme_cpl *cpl));
+
+DEFINE_STUB(spdk_nvme_ctrlr_get_current_process, struct spdk_nvme_ctrlr_process *,
+ (struct spdk_nvme_ctrlr *ctrlr), NULL);
+
+DEFINE_STUB(spdk_nvme_wait_for_completion, int, (struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status), 0);
+
+DEFINE_STUB(spdk_nvme_wait_for_completion_robust_lock, int, (struct spdk_nvme_qpair *qpair,
+ struct nvme_completion_poll_status *status, pthread_mutex_t *robust_mutex), 0);
+
+DEFINE_STUB(spdk_mem_map_set_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
+ uint64_t size, uint64_t translation), 0);
+
+DEFINE_STUB(spdk_mem_map_clear_translation, int, (struct spdk_mem_map *map, uint64_t vaddr,
+ uint64_t size), 0);
+
+DEFINE_STUB(spdk_mem_map_alloc, struct spdk_mem_map *, (uint64_t default_translation,
+ const struct spdk_mem_map_ops *ops, void *cb_ctx), NULL);
+
+DEFINE_STUB_V(spdk_mem_map_free, (struct spdk_mem_map **pmap));
+
+DEFINE_STUB(nvme_fabric_qpair_connect, int, (struct spdk_nvme_qpair *qpair, uint32_t num_entries),
+ 0);
+
+DEFINE_STUB(nvme_transport_ctrlr_set_reg_4, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
+ uint32_t value), 0);
+
+DEFINE_STUB(nvme_fabric_ctrlr_set_reg_4, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
+ uint32_t value), 0);
+
+DEFINE_STUB(nvme_fabric_ctrlr_set_reg_8, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
+ uint64_t value), 0);
+
+DEFINE_STUB(nvme_fabric_ctrlr_get_reg_4, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
+ uint32_t *value), 0);
+
+DEFINE_STUB(nvme_fabric_ctrlr_get_reg_8, int, (struct spdk_nvme_ctrlr *ctrlr, uint32_t offset,
+ uint64_t *value), 0);
+
+DEFINE_STUB_V(nvme_ctrlr_destruct_finish, (struct spdk_nvme_ctrlr *ctrlr));
+
+DEFINE_STUB(nvme_request_check_timeout, int, (struct nvme_request *req, uint16_t cid,
+ struct spdk_nvme_ctrlr_process *active_proc, uint64_t now_tick), 0);
+
+DEFINE_STUB(nvme_fabric_ctrlr_discover, int, (struct spdk_nvme_ctrlr *ctrlr, void *cb_ctx,
+ spdk_nvme_probe_cb probe_cb), 0);
+
+/* used to mock out having to split an SGL over a memory region */
+uint64_t g_mr_size;
+struct ibv_mr g_nvme_rdma_mr;
+
+uint64_t
+spdk_mem_map_translate(const struct spdk_mem_map *map, uint64_t vaddr, uint64_t *size)
+{
+ if (g_mr_size != 0) {
+ *(uint32_t *)size = g_mr_size;
+ }
+
+ return (uint64_t)&g_nvme_rdma_mr;
+}
+
+struct nvme_rdma_ut_bdev_io {
+ struct iovec iovs[NVME_RDMA_MAX_SGL_DESCRIPTORS];
+ int iovpos;
+};
+
+/* essentially a simplification of bdev_nvme_next_sge and bdev_nvme_reset_sgl */
+static void nvme_rdma_ut_reset_sgl(void *cb_arg, uint32_t offset)
+{
+ struct nvme_rdma_ut_bdev_io *bio = cb_arg;
+ struct iovec *iov;
+
+ for (bio->iovpos = 0; bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS; bio->iovpos++) {
+ iov = &bio->iovs[bio->iovpos];
+ /* Only provide offsets at the beginning of an iov */
+ if (offset == 0) {
+ break;
+ }
+
+ offset -= iov->iov_len;
+ }
+
+ SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
+}
+
+static int nvme_rdma_ut_next_sge(void *cb_arg, void **address, uint32_t *length)
+{
+ struct nvme_rdma_ut_bdev_io *bio = cb_arg;
+ struct iovec *iov;
+
+ SPDK_CU_ASSERT_FATAL(bio->iovpos < NVME_RDMA_MAX_SGL_DESCRIPTORS);
+
+ iov = &bio->iovs[bio->iovpos];
+
+ *address = iov->iov_base;
+ *length = iov->iov_len;
+ bio->iovpos++;
+
+ return 0;
+}
+
+static void
+test_nvme_rdma_build_sgl_request(void)
+{
+ struct nvme_rdma_qpair rqpair;
+ struct spdk_nvme_ctrlr ctrlr = {0};
+ struct spdk_nvmf_cmd cmd = {{0}};
+ struct spdk_nvme_rdma_req rdma_req = {0};
+ struct nvme_request req = {{0}};
+ struct nvme_rdma_ut_bdev_io bio;
+ struct spdk_nvme_rdma_mr_map rmap = {0};
+ struct spdk_mem_map *map = NULL;
+ uint64_t i;
+ int rc;
+
+ rmap.map = map;
+
+ ctrlr.max_sges = NVME_RDMA_MAX_SGL_DESCRIPTORS;
+
+ rqpair.mr_map = &rmap;
+ rqpair.qpair.ctrlr = &ctrlr;
+ rqpair.cmds = &cmd;
+ cmd.sgl[0].address = 0x1111;
+
+ rdma_req.id = 0;
+ rdma_req.req = &req;
+
+ req.payload.reset_sgl_fn = nvme_rdma_ut_reset_sgl;
+ req.payload.next_sge_fn = nvme_rdma_ut_next_sge;
+ req.payload.contig_or_cb_arg = &bio;
+ req.qpair = &rqpair.qpair;
+
+ g_nvme_rdma_mr.rkey = 1;
+
+ for (i = 0; i < NVME_RDMA_MAX_SGL_DESCRIPTORS; i++) {
+ bio.iovs[i].iov_base = (void *)i;
+ bio.iovs[i].iov_len = 0;
+ }
+
+ /* Test case 1: single SGL. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x1000;
+ bio.iovs[0].iov_len = 0x1000;
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 1);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.length == req.payload_size);
+ CU_ASSERT(req.cmd.dptr.sgl1.keyed.key == g_nvme_rdma_mr.rkey);
+ CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)bio.iovs[0].iov_base);
+ CU_ASSERT(rdma_req.send_sgl[0].length == sizeof(struct spdk_nvme_cmd));
+
+ /* Test case 2: multiple SGL. Expected: PASS */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x4000;
+ for (i = 0; i < 4; i++) {
+ bio.iovs[i].iov_len = 0x1000;
+ }
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+ CU_ASSERT(bio.iovpos == 4);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.type == SPDK_NVME_SGL_TYPE_LAST_SEGMENT);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.subtype == SPDK_NVME_SGL_SUBTYPE_OFFSET);
+ CU_ASSERT(req.cmd.dptr.sgl1.unkeyed.length == 4 * sizeof(struct spdk_nvme_sgl_descriptor));
+ CU_ASSERT(req.cmd.dptr.sgl1.address == (uint64_t)0);
+ CU_ASSERT(rdma_req.send_sgl[0].length == 4 * sizeof(struct spdk_nvme_sgl_descriptor) + sizeof(
+ struct spdk_nvme_cmd))
+ for (i = 0; i < 4; i++) {
+ CU_ASSERT(cmd.sgl[i].keyed.type == SPDK_NVME_SGL_TYPE_KEYED_DATA_BLOCK);
+ CU_ASSERT(cmd.sgl[i].keyed.subtype == SPDK_NVME_SGL_SUBTYPE_ADDRESS);
+ CU_ASSERT(cmd.sgl[i].keyed.length == bio.iovs[i].iov_len);
+ CU_ASSERT(cmd.sgl[i].keyed.key == g_nvme_rdma_mr.rkey);
+ CU_ASSERT(cmd.sgl[i].address == (uint64_t)bio.iovs[i].iov_base);
+ }
+
+ /* Test case 3: Multiple SGL, SGL larger than mr size. Expected: FAIL */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ g_mr_size = 0x500;
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc != 0);
+ CU_ASSERT(bio.iovpos == 1);
+
+ /* Test case 4: Multiple SGL, SGL size smaller than I/O size */
+ bio.iovpos = 0;
+ req.payload_offset = 0;
+ req.payload_size = 0x6000;
+ g_mr_size = 0x0;
+ rc = nvme_rdma_build_sgl_request(&rqpair, &rdma_req);
+ SPDK_CU_ASSERT_FATAL(rc != 0);
+ CU_ASSERT(bio.iovpos == NVME_RDMA_MAX_SGL_DESCRIPTORS);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvme_rdma", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (CU_add_test(suite, "build_sgl_request", test_nvme_rdma_build_sgl_request) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/Makefile b/src/spdk/test/unit/lib/nvmf/Makefile
new file mode 100644
index 00000000..0b02f8ba
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = request.c ctrlr.c subsystem.c ctrlr_discovery.c ctrlr_bdev.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr.c/.gitignore b/src/spdk/test/unit/lib/nvmf/ctrlr.c/.gitignore
new file mode 100644
index 00000000..65e84943
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr.c/.gitignore
@@ -0,0 +1 @@
+ctrlr_ut
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr.c/Makefile b/src/spdk/test/unit/lib/nvmf/ctrlr.c/Makefile
new file mode 100644
index 00000000..c68c589a
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ctrlr_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c b/src/spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c
new file mode 100644
index 00000000..71555e32
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr.c/ctrlr_ut.c
@@ -0,0 +1,797 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "spdk_internal/mock.h"
+
+#include "common/lib/test_env.c"
+#include "nvmf/ctrlr.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+
+struct spdk_bdev {
+ int ut_mock;
+ uint64_t blockcnt;
+};
+
+DEFINE_STUB(spdk_nvmf_tgt_find_subsystem,
+ struct spdk_nvmf_subsystem *,
+ (struct spdk_nvmf_tgt *tgt, const char *subnqn),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_poll_group_create,
+ struct spdk_nvmf_poll_group *,
+ (struct spdk_nvmf_tgt *tgt),
+ NULL);
+
+DEFINE_STUB_V(spdk_nvmf_poll_group_destroy,
+ (struct spdk_nvmf_poll_group *group));
+
+DEFINE_STUB_V(spdk_nvmf_transport_qpair_fini,
+ (struct spdk_nvmf_qpair *qpair));
+
+DEFINE_STUB(spdk_nvmf_poll_group_add,
+ int,
+ (struct spdk_nvmf_poll_group *group, struct spdk_nvmf_qpair *qpair),
+ 0);
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_sn,
+ const char *,
+ (const struct spdk_nvmf_subsystem *subsystem),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_ns,
+ struct spdk_nvmf_ns *,
+ (struct spdk_nvmf_subsystem *subsystem, uint32_t nsid),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_first_ns,
+ struct spdk_nvmf_ns *,
+ (struct spdk_nvmf_subsystem *subsystem),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_next_ns,
+ struct spdk_nvmf_ns *,
+ (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_subsystem_host_allowed,
+ bool,
+ (struct spdk_nvmf_subsystem *subsystem, const char *hostnqn),
+ true);
+
+DEFINE_STUB(spdk_nvmf_subsystem_add_ctrlr,
+ int,
+ (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr),
+ 0);
+
+DEFINE_STUB_V(spdk_nvmf_subsystem_remove_ctrlr,
+ (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ctrlr *ctrlr));
+
+DEFINE_STUB(spdk_nvmf_subsystem_get_ctrlr,
+ struct spdk_nvmf_ctrlr *,
+ (struct spdk_nvmf_subsystem *subsystem, uint16_t cntlid),
+ NULL);
+
+DEFINE_STUB(spdk_nvmf_ctrlr_dsm_supported,
+ bool,
+ (struct spdk_nvmf_ctrlr *ctrlr),
+ false);
+
+DEFINE_STUB(spdk_nvmf_ctrlr_write_zeroes_supported,
+ bool,
+ (struct spdk_nvmf_ctrlr *ctrlr),
+ false);
+
+DEFINE_STUB_V(spdk_nvmf_get_discovery_log_page,
+ (struct spdk_nvmf_tgt *tgt, void *buffer, uint64_t offset, uint32_t length));
+
+DEFINE_STUB(spdk_nvmf_request_complete,
+ int,
+ (struct spdk_nvmf_request *req),
+ -1);
+
+DEFINE_STUB(spdk_nvmf_request_free,
+ int,
+ (struct spdk_nvmf_request *req),
+ -1);
+
+DEFINE_STUB(spdk_nvmf_qpair_get_listen_trid,
+ int,
+ (struct spdk_nvmf_qpair *qpair, struct spdk_nvme_transport_id *trid),
+ 0);
+
+DEFINE_STUB(spdk_nvmf_subsystem_listener_allowed,
+ bool,
+ (struct spdk_nvmf_subsystem *subsystem, struct spdk_nvme_transport_id *trid),
+ true);
+
+static void
+ctrlr_ut_pass_msg(spdk_thread_fn fn, void *ctx, void *thread_ctx)
+{
+ fn(ctx);
+}
+
+void
+spdk_nvmf_bdev_ctrlr_identify_ns(struct spdk_nvmf_ns *ns, struct spdk_nvme_ns_data *nsdata)
+{
+ uint64_t num_blocks;
+
+ SPDK_CU_ASSERT_FATAL(ns->bdev != NULL);
+ num_blocks = ns->bdev->blockcnt;
+ nsdata->nsze = num_blocks;
+ nsdata->ncap = num_blocks;
+ nsdata->nuse = num_blocks;
+ nsdata->nlbaf = 0;
+ nsdata->flbas.format = 0;
+ nsdata->lbaf[0].lbads = spdk_u32log2(512);
+}
+
+static void
+test_get_log_page(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_request req = {};
+ struct spdk_nvmf_qpair qpair = {};
+ struct spdk_nvmf_ctrlr ctrlr = {};
+ union nvmf_h2c_msg cmd = {};
+ union nvmf_c2h_msg rsp = {};
+ char data[4096];
+
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+
+ ctrlr.subsys = &subsystem;
+
+ qpair.ctrlr = &ctrlr;
+
+ req.qpair = &qpair;
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+ req.data = &data;
+ req.length = sizeof(data);
+
+ /* Get Log Page - all valid */
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
+ cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16;
+ CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+
+ /* Get Log Page with invalid log ID */
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
+ cmd.nvme_cmd.cdw10 = 0;
+ CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+ /* Get Log Page with invalid offset (not dword aligned) */
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
+ cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16;
+ cmd.nvme_cmd.cdw12 = 2;
+ CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+
+ /* Get Log Page without data buffer */
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ req.data = NULL;
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_GET_LOG_PAGE;
+ cmd.nvme_cmd.cdw10 = SPDK_NVME_LOG_ERROR | (req.length / 4 - 1) << 16;
+ CU_ASSERT(spdk_nvmf_ctrlr_get_log_page(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(req.rsp->nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+ req.data = data;
+}
+
+static void
+test_process_fabrics_cmd(void)
+{
+ struct spdk_nvmf_request req = {};
+ int ret;
+ struct spdk_nvmf_qpair req_qpair = {};
+ union nvmf_h2c_msg req_cmd = {};
+ union nvmf_c2h_msg req_rsp = {};
+
+ req.qpair = &req_qpair;
+ req.cmd = &req_cmd;
+ req.rsp = &req_rsp;
+ req.qpair->ctrlr = NULL;
+
+ /* No ctrlr and invalid command check */
+ req.cmd->nvmf_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_PROPERTY_GET;
+ ret = spdk_nvmf_ctrlr_process_fabrics_cmd(&req);
+ CU_ASSERT_EQUAL(req.rsp->nvme_cpl.status.sc, SPDK_NVME_SC_COMMAND_SEQUENCE_ERROR);
+ CU_ASSERT_EQUAL(ret, SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+}
+
+static bool
+nvme_status_success(const struct spdk_nvme_status *status)
+{
+ return status->sct == SPDK_NVME_SCT_GENERIC && status->sc == SPDK_NVME_SC_SUCCESS;
+}
+
+static void
+test_connect(void)
+{
+ struct spdk_nvmf_fabric_connect_data connect_data;
+ struct spdk_thread *thread;
+ struct spdk_nvmf_poll_group group;
+ struct spdk_nvmf_transport transport;
+ struct spdk_nvmf_subsystem subsystem;
+ struct spdk_nvmf_request req;
+ struct spdk_nvmf_qpair admin_qpair;
+ struct spdk_nvmf_qpair qpair;
+ struct spdk_nvmf_qpair qpair2;
+ struct spdk_nvmf_ctrlr ctrlr;
+ struct spdk_nvmf_tgt tgt;
+ union nvmf_h2c_msg cmd;
+ union nvmf_c2h_msg rsp;
+ const uint8_t hostid[16] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
+ };
+ const char subnqn[] = "nqn.2016-06.io.spdk:subsystem1";
+ const char hostnqn[] = "nqn.2016-06.io.spdk:host1";
+ int rc;
+
+ thread = spdk_allocate_thread(ctrlr_ut_pass_msg, NULL, NULL, NULL, "ctrlr_ut");
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+
+ memset(&group, 0, sizeof(group));
+ group.thread = thread;
+
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.subsys = &subsystem;
+ ctrlr.qpair_mask = spdk_bit_array_create(3);
+ SPDK_CU_ASSERT_FATAL(ctrlr.qpair_mask != NULL);
+ ctrlr.vcprop.cc.bits.en = 1;
+ ctrlr.vcprop.cc.bits.iosqes = 6;
+ ctrlr.vcprop.cc.bits.iocqes = 4;
+
+ memset(&admin_qpair, 0, sizeof(admin_qpair));
+ admin_qpair.group = &group;
+
+ memset(&tgt, 0, sizeof(tgt));
+ memset(&transport, 0, sizeof(transport));
+ transport.opts.max_queue_depth = 64;
+ transport.opts.max_qpairs_per_ctrlr = 3;
+ transport.tgt = &tgt;
+
+ memset(&qpair, 0, sizeof(qpair));
+ qpair.transport = &transport;
+ qpair.group = &group;
+
+ memset(&connect_data, 0, sizeof(connect_data));
+ memcpy(connect_data.hostid, hostid, sizeof(hostid));
+ connect_data.cntlid = 0xFFFF;
+ snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
+ snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
+
+ memset(&subsystem, 0, sizeof(subsystem));
+ subsystem.thread = thread;
+ subsystem.id = 1;
+ TAILQ_INIT(&subsystem.ctrlrs);
+ subsystem.tgt = &tgt;
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+ snprintf(subsystem.subnqn, sizeof(subsystem.subnqn), "%s", subnqn);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.connect_cmd.opcode = SPDK_NVME_OPC_FABRIC;
+ cmd.connect_cmd.cid = 1;
+ cmd.connect_cmd.fctype = SPDK_NVMF_FABRIC_COMMAND_CONNECT;
+ cmd.connect_cmd.recfmt = 0;
+ cmd.connect_cmd.qid = 0;
+ cmd.connect_cmd.sqsize = 31;
+ cmd.connect_cmd.cattr = 0;
+ cmd.connect_cmd.kato = 120000;
+
+ memset(&req, 0, sizeof(req));
+ req.qpair = &qpair;
+ req.length = sizeof(connect_data);
+ req.xfer = SPDK_NVME_DATA_HOST_TO_CONTROLLER;
+ req.data = &connect_data;
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+
+ MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
+ MOCK_SET(spdk_nvmf_poll_group_create, &group);
+
+ /* Valid admin connect command */
+ memset(&rsp, 0, sizeof(rsp));
+ rc = spdk_nvmf_ctrlr_connect(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+ CU_ASSERT(qpair.ctrlr != NULL);
+ spdk_bit_array_free(&qpair.ctrlr->qpair_mask);
+ free(qpair.ctrlr);
+ qpair.ctrlr = NULL;
+
+ /* Invalid data length */
+ memset(&rsp, 0, sizeof(rsp));
+ req.length = sizeof(connect_data) - 1;
+ rc = spdk_nvmf_ctrlr_connect(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_FIELD);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ req.length = sizeof(connect_data);
+
+ /* Invalid recfmt */
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.connect_cmd.recfmt = 1234;
+ rc = spdk_nvmf_ctrlr_connect(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INCOMPATIBLE_FORMAT);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ cmd.connect_cmd.recfmt = 0;
+
+ /* Unterminated subnqn */
+ memset(&rsp, 0, sizeof(rsp));
+ memset(connect_data.subnqn, 'a', sizeof(connect_data.subnqn));
+ rc = spdk_nvmf_ctrlr_connect(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ snprintf(connect_data.subnqn, sizeof(connect_data.subnqn), "%s", subnqn);
+
+ /* Subsystem not found */
+ memset(&rsp, 0, sizeof(rsp));
+ MOCK_SET(spdk_nvmf_tgt_find_subsystem, NULL);
+ rc = spdk_nvmf_ctrlr_connect(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 256);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ MOCK_SET(spdk_nvmf_tgt_find_subsystem, &subsystem);
+
+ /* Unterminated hostnqn */
+ memset(&rsp, 0, sizeof(rsp));
+ memset(connect_data.hostnqn, 'b', sizeof(connect_data.hostnqn));
+ rc = spdk_nvmf_ctrlr_connect(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 512);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ snprintf(connect_data.hostnqn, sizeof(connect_data.hostnqn), "%s", hostnqn);
+
+ /* Host not allowed */
+ memset(&rsp, 0, sizeof(rsp));
+ MOCK_SET(spdk_nvmf_subsystem_host_allowed, false);
+ rc = spdk_nvmf_ctrlr_connect(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_HOST);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ MOCK_SET(spdk_nvmf_subsystem_host_allowed, true);
+
+ /* Invalid sqsize == 0 */
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.connect_cmd.sqsize = 0;
+ rc = spdk_nvmf_ctrlr_connect(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ cmd.connect_cmd.sqsize = 31;
+
+ /* Invalid sqsize > max_queue_depth */
+ memset(&rsp, 0, sizeof(rsp));
+ cmd.connect_cmd.sqsize = 64;
+ rc = spdk_nvmf_ctrlr_connect(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 44);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ cmd.connect_cmd.sqsize = 31;
+
+ /* Invalid cntlid for admin queue */
+ memset(&rsp, 0, sizeof(rsp));
+ connect_data.cntlid = 0x1234;
+ rc = spdk_nvmf_ctrlr_connect(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ connect_data.cntlid = 0xFFFF;
+
+ ctrlr.admin_qpair = &admin_qpair;
+ ctrlr.subsys = &subsystem;
+
+ /* Valid I/O queue connect command */
+ memset(&rsp, 0, sizeof(rsp));
+ MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, &ctrlr);
+ cmd.connect_cmd.qid = 1;
+ rc = spdk_nvmf_ctrlr_connect(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(nvme_status_success(&rsp.nvme_cpl.status));
+ CU_ASSERT(qpair.ctrlr == &ctrlr);
+ qpair.ctrlr = NULL;
+
+ /* Non-existent controller */
+ memset(&rsp, 0, sizeof(rsp));
+ MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, NULL);
+ rc = spdk_nvmf_ctrlr_connect(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 1);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 16);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ MOCK_SET(spdk_nvmf_subsystem_get_ctrlr, &ctrlr);
+
+ /* I/O connect to discovery controller */
+ memset(&rsp, 0, sizeof(rsp));
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_DISCOVERY;
+ rc = spdk_nvmf_ctrlr_connect(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+
+ /* I/O connect to disabled controller */
+ memset(&rsp, 0, sizeof(rsp));
+ ctrlr.vcprop.cc.bits.en = 0;
+ rc = spdk_nvmf_ctrlr_connect(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ ctrlr.vcprop.cc.bits.en = 1;
+
+ /* I/O connect with invalid IOSQES */
+ memset(&rsp, 0, sizeof(rsp));
+ ctrlr.vcprop.cc.bits.iosqes = 3;
+ rc = spdk_nvmf_ctrlr_connect(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ ctrlr.vcprop.cc.bits.iosqes = 6;
+
+ /* I/O connect with invalid IOCQES */
+ memset(&rsp, 0, sizeof(rsp));
+ ctrlr.vcprop.cc.bits.iocqes = 3;
+ rc = spdk_nvmf_ctrlr_connect(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVMF_FABRIC_SC_INVALID_PARAM);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.iattr == 0);
+ CU_ASSERT(rsp.connect_rsp.status_code_specific.invalid.ipo == 42);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ ctrlr.vcprop.cc.bits.iocqes = 4;
+
+ /* I/O connect with too many existing qpairs */
+ memset(&rsp, 0, sizeof(rsp));
+ spdk_bit_array_set(ctrlr.qpair_mask, 0);
+ spdk_bit_array_set(ctrlr.qpair_mask, 1);
+ spdk_bit_array_set(ctrlr.qpair_mask, 2);
+ rc = spdk_nvmf_ctrlr_connect(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
+ CU_ASSERT(qpair.ctrlr == NULL);
+ spdk_bit_array_clear(ctrlr.qpair_mask, 0);
+ spdk_bit_array_clear(ctrlr.qpair_mask, 1);
+ spdk_bit_array_clear(ctrlr.qpair_mask, 2);
+
+ /* I/O connect with duplicate queue ID */
+ memset(&rsp, 0, sizeof(rsp));
+ memset(&qpair2, 0, sizeof(qpair2));
+ qpair2.group = &group;
+ qpair2.qid = 1;
+ spdk_bit_array_set(ctrlr.qpair_mask, 1);
+ cmd.connect_cmd.qid = 1;
+ rc = spdk_nvmf_ctrlr_connect(&req);
+ CU_ASSERT(rc == SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_COMMAND_SPECIFIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_QUEUE_IDENTIFIER);
+ CU_ASSERT(qpair.ctrlr == NULL);
+
+ /* Clean up globals */
+ MOCK_CLEAR(spdk_nvmf_tgt_find_subsystem);
+ MOCK_CLEAR(spdk_nvmf_poll_group_create);
+
+ spdk_bit_array_free(&ctrlr.qpair_mask);
+ spdk_free_thread();
+}
+
+static void
+test_get_ns_id_desc_list(void)
+{
+ struct spdk_nvmf_subsystem subsystem;
+ struct spdk_nvmf_qpair qpair;
+ struct spdk_nvmf_ctrlr ctrlr;
+ struct spdk_nvmf_request req;
+ struct spdk_nvmf_ns *ns_ptrs[1];
+ struct spdk_nvmf_ns ns;
+ union nvmf_h2c_msg cmd;
+ union nvmf_c2h_msg rsp;
+ struct spdk_bdev bdev;
+ uint8_t buf[4096];
+
+ memset(&subsystem, 0, sizeof(subsystem));
+ ns_ptrs[0] = &ns;
+ subsystem.ns = ns_ptrs;
+ subsystem.max_nsid = 1;
+ subsystem.subtype = SPDK_NVMF_SUBTYPE_NVME;
+
+ memset(&ns, 0, sizeof(ns));
+ ns.opts.nsid = 1;
+ ns.bdev = &bdev;
+
+ memset(&qpair, 0, sizeof(qpair));
+ qpair.ctrlr = &ctrlr;
+
+ memset(&ctrlr, 0, sizeof(ctrlr));
+ ctrlr.subsys = &subsystem;
+ ctrlr.vcprop.cc.bits.en = 1;
+
+ memset(&req, 0, sizeof(req));
+ req.qpair = &qpair;
+ req.cmd = &cmd;
+ req.rsp = &rsp;
+ req.xfer = SPDK_NVME_DATA_CONTROLLER_TO_HOST;
+ req.data = buf;
+ req.length = sizeof(buf);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.nvme_cmd.opc = SPDK_NVME_OPC_IDENTIFY;
+ cmd.nvme_cmd.cdw10 = SPDK_NVME_IDENTIFY_NS_ID_DESCRIPTOR_LIST;
+
+ /* Invalid NSID */
+ cmd.nvme_cmd.nsid = 0;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
+
+ /* Valid NSID, but ns has no IDs defined */
+ cmd.nvme_cmd.nsid = 1;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(spdk_mem_all_zero(buf, sizeof(buf)));
+
+ /* Valid NSID, only EUI64 defined */
+ ns.opts.eui64[0] = 0x11;
+ ns.opts.eui64[7] = 0xFF;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
+ CU_ASSERT(buf[1] == 8);
+ CU_ASSERT(buf[4] == 0x11);
+ CU_ASSERT(buf[11] == 0xFF);
+ CU_ASSERT(buf[13] == 0);
+
+ /* Valid NSID, only NGUID defined */
+ memset(ns.opts.eui64, 0, sizeof(ns.opts.eui64));
+ ns.opts.nguid[0] = 0x22;
+ ns.opts.nguid[15] = 0xEE;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(buf[0] == SPDK_NVME_NIDT_NGUID);
+ CU_ASSERT(buf[1] == 16);
+ CU_ASSERT(buf[4] == 0x22);
+ CU_ASSERT(buf[19] == 0xEE);
+ CU_ASSERT(buf[21] == 0);
+
+ /* Valid NSID, both EUI64 and NGUID defined */
+ ns.opts.eui64[0] = 0x11;
+ ns.opts.eui64[7] = 0xFF;
+ ns.opts.nguid[0] = 0x22;
+ ns.opts.nguid[15] = 0xEE;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
+ CU_ASSERT(buf[1] == 8);
+ CU_ASSERT(buf[4] == 0x11);
+ CU_ASSERT(buf[11] == 0xFF);
+ CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
+ CU_ASSERT(buf[13] == 16);
+ CU_ASSERT(buf[16] == 0x22);
+ CU_ASSERT(buf[31] == 0xEE);
+ CU_ASSERT(buf[33] == 0);
+
+ /* Valid NSID, EUI64, NGUID, and UUID defined */
+ ns.opts.eui64[0] = 0x11;
+ ns.opts.eui64[7] = 0xFF;
+ ns.opts.nguid[0] = 0x22;
+ ns.opts.nguid[15] = 0xEE;
+ ns.opts.uuid.u.raw[0] = 0x33;
+ ns.opts.uuid.u.raw[15] = 0xDD;
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_process_admin_cmd(&req) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.nvme_cpl.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.nvme_cpl.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(buf[0] == SPDK_NVME_NIDT_EUI64);
+ CU_ASSERT(buf[1] == 8);
+ CU_ASSERT(buf[4] == 0x11);
+ CU_ASSERT(buf[11] == 0xFF);
+ CU_ASSERT(buf[12] == SPDK_NVME_NIDT_NGUID);
+ CU_ASSERT(buf[13] == 16);
+ CU_ASSERT(buf[16] == 0x22);
+ CU_ASSERT(buf[31] == 0xEE);
+ CU_ASSERT(buf[32] == SPDK_NVME_NIDT_UUID);
+ CU_ASSERT(buf[33] == 16);
+ CU_ASSERT(buf[36] == 0x33);
+ CU_ASSERT(buf[51] == 0xDD);
+ CU_ASSERT(buf[53] == 0);
+}
+
+static void
+test_identify_ns(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {};
+ struct spdk_nvmf_transport transport = {};
+ struct spdk_nvmf_qpair admin_qpair = { .transport = &transport};
+ struct spdk_nvmf_ctrlr ctrlr = { .subsys = &subsystem, .admin_qpair = &admin_qpair };
+ struct spdk_nvme_cmd cmd = {};
+ struct spdk_nvme_cpl rsp = {};
+ struct spdk_nvme_ns_data nsdata = {};
+ struct spdk_bdev bdev[3] = {{.blockcnt = 1234}, {.blockcnt = 0}, {.blockcnt = 5678}};
+ struct spdk_nvmf_ns ns[3] = {{.bdev = &bdev[0]}, {.bdev = NULL}, {.bdev = &bdev[2]}};
+ struct spdk_nvmf_ns *ns_arr[3] = {&ns[0], NULL, &ns[2]};
+
+ subsystem.ns = ns_arr;
+ subsystem.max_nsid = SPDK_COUNTOF(ns_arr);
+
+ /* Invalid NSID 0 */
+ cmd.nsid = 0;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
+ CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
+
+ /* Valid NSID 1 */
+ cmd.nsid = 1;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(nsdata.nsze == 1234);
+
+ /* Valid but inactive NSID 2 */
+ cmd.nsid = 2;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
+
+ /* Valid NSID 3 */
+ cmd.nsid = 3;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_SUCCESS);
+ CU_ASSERT(nsdata.nsze == 5678);
+
+ /* Invalid NSID 4 */
+ cmd.nsid = 4;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
+ CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
+
+ /* Invalid NSID 0xFFFFFFFF (NS management not supported) */
+ cmd.nsid = 0xFFFFFFFF;
+ memset(&nsdata, 0, sizeof(nsdata));
+ memset(&rsp, 0, sizeof(rsp));
+ CU_ASSERT(spdk_nvmf_ctrlr_identify_ns(&ctrlr, &cmd, &rsp,
+ &nsdata) == SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE);
+ CU_ASSERT(rsp.status.sct == SPDK_NVME_SCT_GENERIC);
+ CU_ASSERT(rsp.status.sc == SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
+ CU_ASSERT(spdk_mem_all_zero(&nsdata, sizeof(nsdata)));
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "get_log_page", test_get_log_page) == NULL ||
+ CU_add_test(suite, "process_fabrics_cmd", test_process_fabrics_cmd) == NULL ||
+ CU_add_test(suite, "connect", test_connect) == NULL ||
+ CU_add_test(suite, "get_ns_id_desc_list", test_get_ns_id_desc_list) == NULL ||
+ CU_add_test(suite, "identify_ns", test_identify_ns) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/.gitignore b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/.gitignore
new file mode 100644
index 00000000..78fca101
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/.gitignore
@@ -0,0 +1 @@
+ctrlr_bdev_ut
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/Makefile b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/Makefile
new file mode 100644
index 00000000..1d22f14b
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ctrlr_bdev_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c
new file mode 100644
index 00000000..1085e4d7
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_bdev.c/ctrlr_bdev_ut.c
@@ -0,0 +1,260 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "nvmf/ctrlr_bdev.c"
+
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+
+int
+spdk_nvmf_request_complete(struct spdk_nvmf_request *req)
+{
+ return -1;
+}
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return "test";
+}
+
+uint32_t
+spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
+{
+ abort();
+ return 0;
+}
+
+uint64_t
+spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
+{
+ abort();
+ return 0;
+}
+
+uint32_t
+spdk_bdev_get_optimal_io_boundary(const struct spdk_bdev *bdev)
+{
+ abort();
+ return 0;
+}
+
+struct spdk_io_channel *
+spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
+{
+ return NULL;
+}
+
+int
+spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return 0;
+}
+
+int
+spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return 0;
+}
+
+bool
+spdk_bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type)
+{
+ return false;
+}
+
+int
+spdk_bdev_queue_io_wait(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
+ struct spdk_bdev_io_wait_entry *entry)
+{
+ return 0;
+}
+
+int
+spdk_bdev_write_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return 0;
+}
+
+int
+spdk_bdev_writev_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return 0;
+}
+
+int
+spdk_bdev_read_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, void *buf,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return 0;
+}
+
+int spdk_bdev_readv_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return 0;
+}
+
+int
+spdk_bdev_write_zeroes_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return 0;
+}
+
+int
+spdk_bdev_nvme_io_passthru(struct spdk_bdev_desc *desc,
+ struct spdk_io_channel *ch,
+ const struct spdk_nvme_cmd *cmd,
+ void *buf, size_t nbytes,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return 0;
+}
+
+void spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
+{
+}
+
+const char *spdk_nvmf_subsystem_get_nqn(struct spdk_nvmf_subsystem *subsystem)
+{
+ return NULL;
+}
+
+struct spdk_nvmf_ns *
+spdk_nvmf_subsystem_get_ns(struct spdk_nvmf_subsystem *subsystem, uint32_t nsid)
+{
+ abort();
+ return NULL;
+}
+
+struct spdk_nvmf_ns *
+spdk_nvmf_subsystem_get_first_ns(struct spdk_nvmf_subsystem *subsystem)
+{
+ abort();
+ return NULL;
+}
+
+struct spdk_nvmf_ns *
+spdk_nvmf_subsystem_get_next_ns(struct spdk_nvmf_subsystem *subsystem, struct spdk_nvmf_ns *prev_ns)
+{
+ abort();
+ return NULL;
+}
+
+void spdk_bdev_io_get_nvme_status(const struct spdk_bdev_io *bdev_io, int *sct, int *sc)
+{
+}
+
+static void
+test_get_rw_params(void)
+{
+ struct spdk_nvme_cmd cmd = {0};
+ uint64_t lba;
+ uint64_t count;
+
+ lba = 0;
+ count = 0;
+ to_le64(&cmd.cdw10, 0x1234567890ABCDEF);
+ to_le32(&cmd.cdw12, 0x9875 | SPDK_NVME_IO_FLAGS_FORCE_UNIT_ACCESS);
+ nvmf_bdev_ctrlr_get_rw_params(&cmd, &lba, &count);
+ CU_ASSERT(lba == 0x1234567890ABCDEF);
+ CU_ASSERT(count == 0x9875 + 1); /* NOTE: this field is 0's based, hence the +1 */
+}
+
+static void
+test_lba_in_range(void)
+{
+ /* Trivial cases (no overflow) */
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1000) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 0, 1001) == false);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 999) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1, 1000) == false);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 999, 1) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1000, 1) == false);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(1000, 1001, 1) == false);
+
+ /* Overflow edge cases */
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 0, UINT64_MAX) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, 1, UINT64_MAX) == false)
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX - 1, 1) == true);
+ CU_ASSERT(nvmf_bdev_ctrlr_lba_in_range(UINT64_MAX, UINT64_MAX, 1) == false);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "get_rw_params", test_get_rw_params) == NULL ||
+ CU_add_test(suite, "lba_in_range", test_lba_in_range) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/.gitignore b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/.gitignore
new file mode 100644
index 00000000..a975a97e
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/.gitignore
@@ -0,0 +1 @@
+ctrlr_discovery_ut
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/Makefile b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/Makefile
new file mode 100644
index 00000000..e56238d2
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = ctrlr_discovery_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut.c b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut.c
new file mode 100644
index 00000000..f86bdf2b
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/ctrlr_discovery.c/ctrlr_discovery_ut.c
@@ -0,0 +1,306 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+#include "spdk_internal/mock.h"
+
+#include "common/lib/test_env.c"
+#include "nvmf/ctrlr_discovery.c"
+#include "nvmf/subsystem.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+
+DEFINE_STUB(spdk_bdev_module_claim_bdev,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
+ struct spdk_bdev_module *module), 0);
+
+DEFINE_STUB_V(spdk_bdev_module_release_bdev,
+ (struct spdk_bdev *bdev));
+
+uint32_t
+spdk_env_get_current_core(void)
+{
+ return 0;
+}
+
+struct spdk_event *
+spdk_event_allocate(uint32_t core, spdk_event_fn fn, void *arg1, void *arg2)
+{
+ return NULL;
+}
+
+void
+spdk_event_call(struct spdk_event *event)
+{
+
+}
+
+int
+spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **desc)
+{
+ return 0;
+}
+
+void
+spdk_bdev_close(struct spdk_bdev_desc *desc)
+{
+}
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return "test";
+}
+
+const struct spdk_uuid *
+spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
+{
+ return &bdev->uuid;
+}
+
+int
+spdk_nvmf_transport_listen(struct spdk_nvmf_transport *transport,
+ const struct spdk_nvme_transport_id *trid)
+{
+ return 0;
+}
+
+void
+spdk_nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
+ struct spdk_nvme_transport_id *trid,
+ struct spdk_nvmf_discovery_log_page_entry *entry)
+{
+ entry->trtype = 42;
+}
+
+static struct spdk_nvmf_transport g_transport = {};
+
+struct spdk_nvmf_transport *
+spdk_nvmf_transport_create(enum spdk_nvme_transport_type type,
+ struct spdk_nvmf_transport_opts *tprt_opts)
+{
+ if (type == SPDK_NVME_TRANSPORT_RDMA) {
+ return &g_transport;
+ }
+
+ return NULL;
+}
+
+struct spdk_nvmf_subsystem *
+spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn)
+{
+ return NULL;
+}
+
+struct spdk_nvmf_transport *
+spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, enum spdk_nvme_transport_type trtype)
+{
+ return &g_transport;
+}
+
+bool
+spdk_nvmf_transport_qpair_is_idle(struct spdk_nvmf_qpair *qpair)
+{
+ return false;
+}
+
+int
+spdk_nvme_transport_id_parse_trtype(enum spdk_nvme_transport_type *trtype, const char *str)
+{
+ if (trtype == NULL || str == NULL) {
+ return -EINVAL;
+ }
+
+ if (strcasecmp(str, "PCIe") == 0) {
+ *trtype = SPDK_NVME_TRANSPORT_PCIE;
+ } else if (strcasecmp(str, "RDMA") == 0) {
+ *trtype = SPDK_NVME_TRANSPORT_RDMA;
+ } else {
+ return -ENOENT;
+ }
+ return 0;
+}
+
+int
+spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
+ const struct spdk_nvme_transport_id *trid2)
+{
+ return 0;
+}
+
+void
+spdk_nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid)
+{
+}
+
+void
+spdk_nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
+{
+}
+
+int
+spdk_nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem)
+{
+ return 0;
+}
+
+int
+spdk_nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+void
+spdk_nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+void
+spdk_nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+void
+spdk_nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+static void
+test_discovery_log(void)
+{
+ struct spdk_nvmf_tgt tgt = {};
+ struct spdk_nvmf_subsystem *subsystem;
+ uint8_t buffer[8192];
+ struct spdk_nvmf_discovery_log_page *disc_log;
+ struct spdk_nvmf_discovery_log_page_entry *entry;
+ struct spdk_nvme_transport_id trid = {};
+
+ tgt.opts.max_subsystems = 1024;
+ tgt.subsystems = calloc(tgt.opts.max_subsystems, sizeof(struct spdk_nvmf_subsystem *));
+ SPDK_CU_ASSERT_FATAL(tgt.subsystems != NULL);
+
+ /* Add one subsystem and verify that the discovery log contains it */
+ subsystem = spdk_nvmf_subsystem_create(&tgt, "nqn.2016-06.io.spdk:subsystem1",
+ SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+
+ trid.trtype = SPDK_NVME_TRANSPORT_RDMA;
+ trid.adrfam = SPDK_NVMF_ADRFAM_IPV4;
+ snprintf(trid.traddr, sizeof(trid.traddr), "1234");
+ snprintf(trid.trsvcid, sizeof(trid.trsvcid), "5678");
+ SPDK_CU_ASSERT_FATAL(spdk_nvmf_subsystem_add_listener(subsystem, &trid) == 0);
+
+ /* Get only genctr (first field in the header) */
+ memset(buffer, 0xCC, sizeof(buffer));
+ disc_log = (struct spdk_nvmf_discovery_log_page *)buffer;
+ spdk_nvmf_get_discovery_log_page(&tgt, buffer, 0, sizeof(disc_log->genctr));
+ CU_ASSERT(disc_log->genctr == 1); /* one added subsystem */
+
+ /* Get only the header, no entries */
+ memset(buffer, 0xCC, sizeof(buffer));
+ disc_log = (struct spdk_nvmf_discovery_log_page *)buffer;
+ spdk_nvmf_get_discovery_log_page(&tgt, buffer, 0, sizeof(*disc_log));
+ CU_ASSERT(disc_log->genctr == 1);
+ CU_ASSERT(disc_log->numrec == 1);
+
+ /* Offset 0, exact size match */
+ memset(buffer, 0xCC, sizeof(buffer));
+ disc_log = (struct spdk_nvmf_discovery_log_page *)buffer;
+ spdk_nvmf_get_discovery_log_page(&tgt, buffer, 0,
+ sizeof(*disc_log) + sizeof(disc_log->entries[0]));
+ CU_ASSERT(disc_log->genctr != 0);
+ CU_ASSERT(disc_log->numrec == 1);
+ CU_ASSERT(disc_log->entries[0].trtype == 42);
+
+ /* Offset 0, oversize buffer */
+ memset(buffer, 0xCC, sizeof(buffer));
+ disc_log = (struct spdk_nvmf_discovery_log_page *)buffer;
+ spdk_nvmf_get_discovery_log_page(&tgt, buffer, 0, sizeof(buffer));
+ CU_ASSERT(disc_log->genctr != 0);
+ CU_ASSERT(disc_log->numrec == 1);
+ CU_ASSERT(disc_log->entries[0].trtype == 42);
+ CU_ASSERT(spdk_mem_all_zero(buffer + sizeof(*disc_log) + sizeof(disc_log->entries[0]),
+ sizeof(buffer) - (sizeof(*disc_log) + sizeof(disc_log->entries[0]))));
+
+ /* Get just the first entry, no header */
+ memset(buffer, 0xCC, sizeof(buffer));
+ entry = (struct spdk_nvmf_discovery_log_page_entry *)buffer;
+ spdk_nvmf_get_discovery_log_page(&tgt, buffer,
+ offsetof(struct spdk_nvmf_discovery_log_page, entries[0]),
+ sizeof(*entry));
+ CU_ASSERT(entry->trtype == 42);
+ spdk_nvmf_subsystem_destroy(subsystem);
+ free(tgt.subsystems);
+ free(tgt.discovery_log_page);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "discovery_log", test_discovery_log) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/request.c/.gitignore b/src/spdk/test/unit/lib/nvmf/request.c/.gitignore
new file mode 100644
index 00000000..7f06e410
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/request.c/.gitignore
@@ -0,0 +1 @@
+request_ut
diff --git a/src/spdk/test/unit/lib/nvmf/request.c/Makefile b/src/spdk/test/unit/lib/nvmf/request.c/Makefile
new file mode 100644
index 00000000..0c683cff
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/request.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = request_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/request.c/request_ut.c b/src/spdk/test/unit/lib/nvmf/request.c/request_ut.c
new file mode 100644
index 00000000..bd21fa63
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/request.c/request_ut.c
@@ -0,0 +1,153 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "nvmf/request.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+
+int
+spdk_nvmf_transport_req_free(struct spdk_nvmf_request *req)
+{
+ return 0;
+}
+
+int
+spdk_nvmf_transport_req_complete(struct spdk_nvmf_request *req)
+{
+ return 0;
+}
+
+int
+spdk_nvmf_ctrlr_process_fabrics_cmd(struct spdk_nvmf_request *req)
+{
+ return -1;
+}
+
+int
+spdk_nvmf_ctrlr_process_admin_cmd(struct spdk_nvmf_request *req)
+{
+ return -1;
+}
+
+int
+spdk_nvmf_ctrlr_process_io_cmd(struct spdk_nvmf_request *req)
+{
+ return -1;
+}
+
+int
+spdk_nvme_ctrlr_cmd_admin_raw(struct spdk_nvme_ctrlr *ctrlr,
+ struct spdk_nvme_cmd *cmd,
+ void *buf, uint32_t len,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return -1;
+}
+
+int
+spdk_nvme_ctrlr_cmd_io_raw(struct spdk_nvme_ctrlr *ctrlr,
+ struct spdk_nvme_qpair *qpair,
+ struct spdk_nvme_cmd *cmd,
+ void *buf, uint32_t len,
+ spdk_nvme_cmd_cb cb_fn, void *cb_arg)
+{
+ return -1;
+}
+
+uint32_t
+spdk_nvme_ctrlr_get_num_ns(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return 0;
+}
+
+union spdk_nvme_vs_register spdk_nvme_ctrlr_get_regs_vs(struct spdk_nvme_ctrlr *ctrlr)
+{
+ union spdk_nvme_vs_register vs;
+
+ vs.raw = 0;
+ return vs;
+}
+
+bool
+spdk_nvme_ns_is_active(struct spdk_nvme_ns *ns)
+{
+ return false;
+}
+
+struct spdk_nvme_ns *spdk_nvme_ctrlr_get_ns(struct spdk_nvme_ctrlr *ctrlr, uint32_t ns_id)
+{
+ return NULL;
+}
+
+int
+spdk_nvmf_qpair_disconnect(struct spdk_nvmf_qpair *qpair, nvmf_qpair_disconnect_cb cb_fn, void *ctx)
+{
+ return 0;
+}
+
+static void
+test_placeholder(void)
+{
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "placeholder", test_placeholder) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/nvmf/subsystem.c/.gitignore b/src/spdk/test/unit/lib/nvmf/subsystem.c/.gitignore
new file mode 100644
index 00000000..76ca0d33
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/subsystem.c/.gitignore
@@ -0,0 +1 @@
+subsystem_ut
diff --git a/src/spdk/test/unit/lib/nvmf/subsystem.c/Makefile b/src/spdk/test/unit/lib/nvmf/subsystem.c/Makefile
new file mode 100644
index 00000000..b62f1ee1
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/subsystem.c/Makefile
@@ -0,0 +1,38 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+
+TEST_FILE = subsystem_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/nvmf/subsystem.c/subsystem_ut.c b/src/spdk/test/unit/lib/nvmf/subsystem.c/subsystem_ut.c
new file mode 100644
index 00000000..1b92efd2
--- /dev/null
+++ b/src/spdk/test/unit/lib/nvmf/subsystem.c/subsystem_ut.c
@@ -0,0 +1,477 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "common/lib/test_env.c"
+#include "spdk_cunit.h"
+#include "spdk_internal/mock.h"
+
+#include "nvmf/subsystem.c"
+
+SPDK_LOG_REGISTER_COMPONENT("nvmf", SPDK_LOG_NVMF)
+
+DEFINE_STUB(spdk_bdev_module_claim_bdev,
+ int,
+ (struct spdk_bdev *bdev, struct spdk_bdev_desc *desc,
+ struct spdk_bdev_module *module), 0);
+
+DEFINE_STUB_V(spdk_bdev_module_release_bdev,
+ (struct spdk_bdev *bdev));
+
+static void
+_subsystem_send_msg(spdk_thread_fn fn, void *ctx, void *thread_ctx)
+{
+ fn(ctx);
+}
+
+static void
+subsystem_ns_remove_cb(struct spdk_nvmf_subsystem *subsystem, void *cb_arg, int status)
+{
+}
+
+uint32_t
+spdk_env_get_current_core(void)
+{
+ return 0;
+}
+
+struct spdk_event *
+spdk_event_allocate(uint32_t core, spdk_event_fn fn, void *arg1, void *arg2)
+{
+ return NULL;
+}
+
+void
+spdk_event_call(struct spdk_event *event)
+{
+
+}
+
+int
+spdk_nvmf_transport_listen(struct spdk_nvmf_transport *transport,
+ const struct spdk_nvme_transport_id *trid)
+{
+ return 0;
+}
+
+void
+spdk_nvmf_transport_listener_discover(struct spdk_nvmf_transport *transport,
+ struct spdk_nvme_transport_id *trid,
+ struct spdk_nvmf_discovery_log_page_entry *entry)
+{
+ entry->trtype = 42;
+}
+
+bool
+spdk_nvmf_transport_qpair_is_idle(struct spdk_nvmf_qpair *qpair)
+{
+ return false;
+}
+
+static struct spdk_nvmf_transport g_transport = {};
+
+struct spdk_nvmf_transport *
+spdk_nvmf_transport_create(enum spdk_nvme_transport_type type,
+ struct spdk_nvmf_transport_opts *tprt_opts)
+{
+ if (type == SPDK_NVME_TRANSPORT_RDMA) {
+ return &g_transport;
+ }
+
+ return NULL;
+}
+
+struct spdk_nvmf_subsystem *
+spdk_nvmf_tgt_find_subsystem(struct spdk_nvmf_tgt *tgt, const char *subnqn)
+{
+ return NULL;
+}
+
+struct spdk_nvmf_transport *
+spdk_nvmf_tgt_get_transport(struct spdk_nvmf_tgt *tgt, enum spdk_nvme_transport_type trtype)
+{
+ if (trtype == SPDK_NVME_TRANSPORT_RDMA) {
+ return &g_transport;
+ }
+
+ return NULL;
+}
+
+int
+spdk_nvmf_poll_group_update_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem)
+{
+ return 0;
+}
+
+int
+spdk_nvmf_poll_group_add_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+ return 0;
+}
+
+void
+spdk_nvmf_poll_group_remove_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+void
+spdk_nvmf_poll_group_pause_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+void
+spdk_nvmf_poll_group_resume_subsystem(struct spdk_nvmf_poll_group *group,
+ struct spdk_nvmf_subsystem *subsystem,
+ spdk_nvmf_poll_group_mod_done cb_fn, void *cb_arg)
+{
+}
+
+int
+spdk_nvme_transport_id_parse_trtype(enum spdk_nvme_transport_type *trtype, const char *str)
+{
+ if (trtype == NULL || str == NULL) {
+ return -EINVAL;
+ }
+
+ if (strcasecmp(str, "PCIe") == 0) {
+ *trtype = SPDK_NVME_TRANSPORT_PCIE;
+ } else if (strcasecmp(str, "RDMA") == 0) {
+ *trtype = SPDK_NVME_TRANSPORT_RDMA;
+ } else {
+ return -ENOENT;
+ }
+ return 0;
+}
+
+int
+spdk_nvme_transport_id_compare(const struct spdk_nvme_transport_id *trid1,
+ const struct spdk_nvme_transport_id *trid2)
+{
+ return 0;
+}
+
+int32_t
+spdk_nvme_ctrlr_process_admin_completions(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return -1;
+}
+
+int32_t
+spdk_nvme_qpair_process_completions(struct spdk_nvme_qpair *qpair, uint32_t max_completions)
+{
+ return -1;
+}
+
+int
+spdk_nvme_detach(struct spdk_nvme_ctrlr *ctrlr)
+{
+ return -1;
+}
+
+void
+spdk_nvmf_ctrlr_destruct(struct spdk_nvmf_ctrlr *ctrlr)
+{
+}
+
+void
+spdk_nvmf_ctrlr_ns_changed(struct spdk_nvmf_ctrlr *ctrlr, uint32_t nsid)
+{
+}
+
+int
+spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **desc)
+{
+ return 0;
+}
+
+void
+spdk_bdev_close(struct spdk_bdev_desc *desc)
+{
+}
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return "test";
+}
+
+const struct spdk_uuid *
+spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
+{
+ return &bdev->uuid;
+}
+
+static void
+test_spdk_nvmf_subsystem_add_ns(void)
+{
+ struct spdk_nvmf_tgt tgt = {};
+ struct spdk_nvmf_subsystem subsystem = {
+ .max_nsid = 0,
+ .ns = NULL,
+ .tgt = &tgt
+ };
+ struct spdk_bdev bdev1 = {}, bdev2 = {};
+ struct spdk_nvmf_ns_opts ns_opts;
+ uint32_t nsid;
+
+ tgt.opts.max_subsystems = 1024;
+ tgt.subsystems = calloc(tgt.opts.max_subsystems, sizeof(struct spdk_nvmf_subsystem *));
+ SPDK_CU_ASSERT_FATAL(tgt.subsystems != NULL);
+
+ /* Allow NSID to be assigned automatically */
+ spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
+ nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev1, &ns_opts, sizeof(ns_opts));
+ /* NSID 1 is the first unused ID */
+ CU_ASSERT(nsid == 1);
+ CU_ASSERT(subsystem.max_nsid == 1);
+ SPDK_CU_ASSERT_FATAL(subsystem.ns != NULL);
+ SPDK_CU_ASSERT_FATAL(subsystem.ns[nsid - 1] != NULL);
+ CU_ASSERT(subsystem.ns[nsid - 1]->bdev == &bdev1);
+
+ /* Request a specific NSID */
+ spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
+ ns_opts.nsid = 5;
+ nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev2, &ns_opts, sizeof(ns_opts));
+ CU_ASSERT(nsid == 5);
+ CU_ASSERT(subsystem.max_nsid == 5);
+ SPDK_CU_ASSERT_FATAL(subsystem.ns[nsid - 1] != NULL);
+ CU_ASSERT(subsystem.ns[nsid - 1]->bdev == &bdev2);
+
+ /* Request an NSID that is already in use */
+ spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
+ ns_opts.nsid = 5;
+ nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev2, &ns_opts, sizeof(ns_opts));
+ CU_ASSERT(nsid == 0);
+ CU_ASSERT(subsystem.max_nsid == 5);
+
+ /* Request 0xFFFFFFFF (invalid NSID, reserved for broadcast) */
+ spdk_nvmf_ns_opts_get_defaults(&ns_opts, sizeof(ns_opts));
+ ns_opts.nsid = 0xFFFFFFFF;
+ nsid = spdk_nvmf_subsystem_add_ns(&subsystem, &bdev2, &ns_opts, sizeof(ns_opts));
+ CU_ASSERT(nsid == 0);
+ CU_ASSERT(subsystem.max_nsid == 5);
+
+ spdk_nvmf_subsystem_remove_ns(&subsystem, 1, subsystem_ns_remove_cb, NULL);
+ spdk_nvmf_subsystem_remove_ns(&subsystem, 5, subsystem_ns_remove_cb, NULL);
+
+ free(subsystem.ns);
+ free(tgt.subsystems);
+}
+
+static void
+nvmf_test_create_subsystem(void)
+{
+ struct spdk_nvmf_tgt tgt = {};
+ char nqn[256];
+ struct spdk_nvmf_subsystem *subsystem;
+
+ tgt.opts.max_subsystems = 1024;
+ tgt.subsystems = calloc(tgt.opts.max_subsystems, sizeof(struct spdk_nvmf_subsystem *));
+ SPDK_CU_ASSERT_FATAL(tgt.subsystems != NULL);
+
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* valid name with complex reverse domain */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk-full--rev-domain.name:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* Valid name discovery controller */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+
+ /* Invalid name, no user supplied string */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Valid name, only contains top-level domain name */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* Invalid name, domain label > 63 characters */
+ snprintf(nqn, sizeof(nqn),
+ "nqn.2016-06.io.abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz:sub");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid name, domain label starts with digit */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.3spdk:sub");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid name, domain label starts with - */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.-spdk:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid name, domain label ends with - */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk-:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid name, domain label with multiple consecutive periods */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io..spdk:subsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Longest valid name */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:");
+ memset(nqn + strlen(nqn), 'a', 223 - strlen(nqn));
+ nqn[223] = '\0';
+ CU_ASSERT(strlen(nqn) == 223);
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* Invalid name, too long */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:");
+ memset(nqn + strlen(nqn), 'a', 224 - strlen(nqn));
+ nqn[224] = '\0';
+ CU_ASSERT(strlen(nqn) == 224);
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ CU_ASSERT(subsystem == NULL);
+
+ /* Valid name using uuid format */
+ snprintf(nqn, sizeof(nqn), "nqn.2014-08.org.nvmexpress:uuid:11111111-aaaa-bbdd-FFEE-123456789abc");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* Invalid name user string contains an invalid utf-8 character */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:\xFFsubsystem1");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Valid name with non-ascii but valid utf-8 characters */
+ snprintf(nqn, sizeof(nqn), "nqn.2016-06.io.spdk:\xe1\x8a\x88subsystem1\xca\x80");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem != NULL);
+ CU_ASSERT_STRING_EQUAL(subsystem->subnqn, nqn);
+ spdk_nvmf_subsystem_destroy(subsystem);
+
+ /* Invalid uuid (too long) */
+ snprintf(nqn, sizeof(nqn),
+ "nqn.2014-08.org.nvmexpress:uuid:11111111-aaaa-bbdd-FFEE-123456789abcdef");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid uuid (dashes placed incorrectly) */
+ snprintf(nqn, sizeof(nqn), "nqn.2014-08.org.nvmexpress:uuid:111111-11aaaa-bbdd-FFEE-123456789abc");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ /* Invalid uuid (invalid characters in uuid) */
+ snprintf(nqn, sizeof(nqn), "nqn.2014-08.org.nvmexpress:uuid:111hg111-aaaa-bbdd-FFEE-123456789abc");
+ subsystem = spdk_nvmf_subsystem_create(&tgt, nqn, SPDK_NVMF_SUBTYPE_NVME, 0);
+ SPDK_CU_ASSERT_FATAL(subsystem == NULL);
+
+ free(tgt.subsystems);
+}
+
+static void
+test_spdk_nvmf_subsystem_set_sn(void)
+{
+ struct spdk_nvmf_subsystem subsystem = {};
+
+ /* Basic valid serial number */
+ CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "abcd xyz") == 0);
+ CU_ASSERT(strcmp(subsystem.sn, "abcd xyz") == 0);
+
+ /* Exactly 20 characters (valid) */
+ CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "12345678901234567890") == 0);
+ CU_ASSERT(strcmp(subsystem.sn, "12345678901234567890") == 0);
+
+ /* 21 characters (too long, invalid) */
+ CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "123456789012345678901") < 0);
+
+ /* Non-ASCII characters (invalid) */
+ CU_ASSERT(spdk_nvmf_subsystem_set_sn(&subsystem, "abcd\txyz") < 0);
+}
+
+int main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("nvmf", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "create_subsystem", nvmf_test_create_subsystem) == NULL ||
+ CU_add_test(suite, "nvmf_subsystem_add_ns", test_spdk_nvmf_subsystem_add_ns) == NULL ||
+ CU_add_test(suite, "nvmf_subsystem_set_sn", test_spdk_nvmf_subsystem_set_sn) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ spdk_allocate_thread(_subsystem_send_msg, NULL, NULL, NULL, "thread0");
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ spdk_free_thread();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/scsi/Makefile b/src/spdk/test/unit/lib/scsi/Makefile
new file mode 100644
index 00000000..9e413897
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = dev.c lun.c scsi.c scsi_bdev.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/scsi/dev.c/.gitignore b/src/spdk/test/unit/lib/scsi/dev.c/.gitignore
new file mode 100644
index 00000000..e325086b
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/dev.c/.gitignore
@@ -0,0 +1 @@
+dev_ut
diff --git a/src/spdk/test/unit/lib/scsi/dev.c/Makefile b/src/spdk/test/unit/lib/scsi/dev.c/Makefile
new file mode 100644
index 00000000..4e7a5fa9
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/dev.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = dev_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/scsi/dev.c/dev_ut.c b/src/spdk/test/unit/lib/scsi/dev.c/dev_ut.c
new file mode 100644
index 00000000..c10a7f0a
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/dev.c/dev_ut.c
@@ -0,0 +1,681 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "CUnit/Basic.h"
+#include "spdk_cunit.h"
+
+#include "spdk/util.h"
+
+#include "scsi/dev.c"
+#include "scsi/port.c"
+
+/* Unit test bdev mockup */
+struct spdk_bdev {
+ char name[100];
+};
+
+static struct spdk_bdev g_bdevs[] = {
+ {"malloc0"},
+ {"malloc1"},
+};
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return bdev->name;
+}
+
+static struct spdk_scsi_task *
+spdk_get_task(uint32_t *owner_task_ctr)
+{
+ struct spdk_scsi_task *task;
+
+ task = calloc(1, sizeof(*task));
+ if (!task) {
+ return NULL;
+ }
+
+ return task;
+}
+
+void
+spdk_scsi_task_put(struct spdk_scsi_task *task)
+{
+ free(task);
+}
+
+_spdk_scsi_lun *
+spdk_scsi_lun_construct(struct spdk_bdev *bdev,
+ void (*hotremove_cb)(const struct spdk_scsi_lun *, void *),
+ void *hotremove_ctx)
+{
+ struct spdk_scsi_lun *lun;
+
+ lun = calloc(1, sizeof(struct spdk_scsi_lun));
+ SPDK_CU_ASSERT_FATAL(lun != NULL);
+
+ lun->bdev = bdev;
+
+ return lun;
+}
+
+void
+spdk_scsi_lun_destruct(struct spdk_scsi_lun *lun)
+{
+ free(lun);
+}
+
+struct spdk_bdev *
+spdk_bdev_get_by_name(const char *bdev_name)
+{
+ size_t i;
+
+ for (i = 0; i < SPDK_COUNTOF(g_bdevs); i++) {
+ if (strcmp(bdev_name, g_bdevs[i].name) == 0) {
+ return &g_bdevs[i];
+ }
+ }
+
+ return NULL;
+}
+
+int
+spdk_scsi_lun_task_mgmt_execute(struct spdk_scsi_task *task, enum spdk_scsi_task_func func)
+{
+ return 0;
+}
+
+void
+spdk_scsi_lun_execute_task(struct spdk_scsi_lun *lun, struct spdk_scsi_task *task)
+{
+}
+
+int
+_spdk_scsi_lun_allocate_io_channel(struct spdk_scsi_lun *lun)
+{
+ return 0;
+}
+
+void
+_spdk_scsi_lun_free_io_channel(struct spdk_scsi_lun *lun)
+{
+}
+
+bool
+spdk_scsi_lun_has_pending_tasks(const struct spdk_scsi_lun *lun)
+{
+ return false;
+}
+
+static void
+dev_destruct_null_dev(void)
+{
+ /* pass null for the dev */
+ spdk_scsi_dev_destruct(NULL);
+}
+
+static void
+dev_destruct_zero_luns(void)
+{
+ struct spdk_scsi_dev dev = { .is_allocated = 1 };
+
+ /* No luns attached to the dev */
+
+ /* free the dev */
+ spdk_scsi_dev_destruct(&dev);
+}
+
+static void
+dev_destruct_null_lun(void)
+{
+ struct spdk_scsi_dev dev = { .is_allocated = 1 };
+
+ /* pass null for the lun */
+ dev.lun[0] = NULL;
+
+ /* free the dev */
+ spdk_scsi_dev_destruct(&dev);
+}
+
+static void
+dev_destruct_success(void)
+{
+ struct spdk_scsi_dev dev = { .is_allocated = 1 };
+ int rc;
+
+ /* dev with a single lun */
+ rc = spdk_scsi_dev_add_lun(&dev, "malloc0", 0, NULL, NULL);
+
+ CU_ASSERT(rc == 0);
+
+ /* free the dev */
+ spdk_scsi_dev_destruct(&dev);
+
+}
+
+static void
+dev_construct_num_luns_zero(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[1] = {};
+ int lun_id_list[1] = { 0 };
+
+ dev = spdk_scsi_dev_construct("Name", bdev_name_list, lun_id_list, 0,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ /* dev should be null since we passed num_luns = 0 */
+ CU_ASSERT_TRUE(dev == NULL);
+}
+
+static void
+dev_construct_no_lun_zero(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[1] = {};
+ int lun_id_list[1] = { 0 };
+
+ lun_id_list[0] = 1;
+
+ dev = spdk_scsi_dev_construct("Name", bdev_name_list, lun_id_list, 1,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ /* dev should be null since no LUN0 was specified (lun_id_list[0] = 1) */
+ CU_ASSERT_TRUE(dev == NULL);
+}
+
+static void
+dev_construct_null_lun(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[1] = {};
+ int lun_id_list[1] = { 0 };
+
+ dev = spdk_scsi_dev_construct("Name", bdev_name_list, lun_id_list, 1,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ /* dev should be null since no LUN0 was specified (lun_list[0] = NULL) */
+ CU_ASSERT_TRUE(dev == NULL);
+}
+
+static void
+dev_construct_name_too_long(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[1] = {"malloc0"};
+ int lun_id_list[1] = { 0 };
+ char name[SPDK_SCSI_DEV_MAX_NAME + 1 + 1];
+
+ /* Try to construct a dev with a name that is one byte longer than allowed. */
+ memset(name, 'x', sizeof(name) - 1);
+ name[sizeof(name) - 1] = '\0';
+
+ dev = spdk_scsi_dev_construct(name, bdev_name_list, lun_id_list, 1,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ CU_ASSERT(dev == NULL);
+}
+
+static void
+dev_construct_success(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[1] = {"malloc0"};
+ int lun_id_list[1] = { 0 };
+
+ dev = spdk_scsi_dev_construct("Name", bdev_name_list, lun_id_list, 1,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ /* Successfully constructs and returns a dev */
+ CU_ASSERT_TRUE(dev != NULL);
+
+ /* free the dev */
+ spdk_scsi_dev_destruct(dev);
+}
+
+static void
+dev_construct_success_lun_zero_not_first(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[2] = {"malloc1", "malloc0"};
+ int lun_id_list[2] = { 1, 0 };
+
+ dev = spdk_scsi_dev_construct("Name", bdev_name_list, lun_id_list, 2,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ /* Successfully constructs and returns a dev */
+ CU_ASSERT_TRUE(dev != NULL);
+
+ /* free the dev */
+ spdk_scsi_dev_destruct(dev);
+}
+
+static void
+dev_queue_mgmt_task_success(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[1] = {"malloc0"};
+ int lun_id_list[1] = { 0 };
+ struct spdk_scsi_task *task;
+
+ dev = spdk_scsi_dev_construct("Name", bdev_name_list, lun_id_list, 1,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ /* Successfully constructs and returns a dev */
+ CU_ASSERT_TRUE(dev != NULL);
+
+ task = spdk_get_task(NULL);
+
+ spdk_scsi_dev_queue_mgmt_task(dev, task, SPDK_SCSI_TASK_FUNC_LUN_RESET);
+
+ spdk_scsi_task_put(task);
+
+ spdk_scsi_dev_destruct(dev);
+}
+
+static void
+dev_queue_task_success(void)
+{
+ struct spdk_scsi_dev *dev;
+ const char *bdev_name_list[1] = {"malloc0"};
+ int lun_id_list[1] = { 0 };
+ struct spdk_scsi_task *task;
+
+ dev = spdk_scsi_dev_construct("Name", bdev_name_list, lun_id_list, 1,
+ SPDK_SPC_PROTOCOL_IDENTIFIER_ISCSI, NULL, NULL);
+
+ /* Successfully constructs and returns a dev */
+ CU_ASSERT_TRUE(dev != NULL);
+
+ task = spdk_get_task(NULL);
+
+ spdk_scsi_dev_queue_task(dev, task);
+
+ spdk_scsi_task_put(task);
+
+ spdk_scsi_dev_destruct(dev);
+}
+
+static void
+dev_stop_success(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ struct spdk_scsi_task *task;
+ struct spdk_scsi_task *task_mgmt;
+
+ task = spdk_get_task(NULL);
+
+ spdk_scsi_dev_queue_task(&dev, task);
+
+ task_mgmt = spdk_get_task(NULL);
+
+ /* Enqueue the tasks into dev->task_mgmt_submit_queue */
+ spdk_scsi_dev_queue_mgmt_task(&dev, task_mgmt, SPDK_SCSI_TASK_FUNC_LUN_RESET);
+
+ spdk_scsi_task_put(task);
+ spdk_scsi_task_put(task_mgmt);
+}
+
+static void
+dev_add_port_max_ports(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ const char *name;
+ int id, rc;
+
+ /* dev is set to SPDK_SCSI_DEV_MAX_PORTS */
+ dev.num_ports = SPDK_SCSI_DEV_MAX_PORTS;
+ name = "Name of Port";
+ id = 1;
+
+ rc = spdk_scsi_dev_add_port(&dev, id, name);
+
+ /* returns -1; since the dev already has maximum
+ * number of ports (SPDK_SCSI_DEV_MAX_PORTS) */
+ CU_ASSERT_TRUE(rc < 0);
+}
+
+static void
+dev_add_port_construct_failure1(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ const int port_name_length = SPDK_SCSI_PORT_MAX_NAME_LENGTH + 2;
+ char name[port_name_length];
+ uint64_t id;
+ int rc;
+
+ dev.num_ports = 1;
+ /* Set the name such that the length exceeds SPDK_SCSI_PORT_MAX_NAME_LENGTH
+ * SPDK_SCSI_PORT_MAX_NAME_LENGTH = 256 */
+ memset(name, 'a', port_name_length - 1);
+ name[port_name_length - 1] = '\0';
+ id = 1;
+
+ rc = spdk_scsi_dev_add_port(&dev, id, name);
+
+ /* returns -1; since the length of the name exceeds
+ * SPDK_SCSI_PORT_MAX_NAME_LENGTH */
+ CU_ASSERT_TRUE(rc < 0);
+}
+
+static void
+dev_add_port_construct_failure2(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ const char *name;
+ uint64_t id;
+ int rc;
+
+ dev.num_ports = 1;
+ name = "Name of Port";
+ id = 1;
+
+ /* Initialize port[0] to be valid and its index is set to 1 */
+ dev.port[0].id = id;
+ dev.port[0].is_used = 1;
+
+ rc = spdk_scsi_dev_add_port(&dev, id, name);
+
+ /* returns -1; since the dev already has a port whose index to be 1 */
+ CU_ASSERT_TRUE(rc < 0);
+}
+
+static void
+dev_add_port_success1(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ const char *name;
+ int id, rc;
+
+ dev.num_ports = 1;
+ name = "Name of Port";
+ id = 1;
+
+ rc = spdk_scsi_dev_add_port(&dev, id, name);
+
+ /* successfully adds a port */
+ CU_ASSERT_EQUAL(rc, 0);
+ /* Assert num_ports has been incremented to 2 */
+ CU_ASSERT_EQUAL(dev.num_ports, 2);
+}
+
+static void
+dev_add_port_success2(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ const char *name;
+ uint64_t id;
+ int rc;
+
+ dev.num_ports = 1;
+ name = "Name of Port";
+ id = 1;
+ /* set id of invalid port[0] to 1. This must be ignored */
+ dev.port[0].id = id;
+ dev.port[0].is_used = 0;
+
+ rc = spdk_scsi_dev_add_port(&dev, id, name);
+
+ /* successfully adds a port */
+ CU_ASSERT_EQUAL(rc, 0);
+ /* Assert num_ports has been incremented to 1 */
+ CU_ASSERT_EQUAL(dev.num_ports, 2);
+}
+
+static void
+dev_add_port_success3(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ const char *name;
+ uint64_t add_id;
+ int rc;
+
+ dev.num_ports = 1;
+ name = "Name of Port";
+ dev.port[0].id = 1;
+ dev.port[0].is_used = 1;
+ add_id = 2;
+
+ /* Add a port with id = 2 */
+ rc = spdk_scsi_dev_add_port(&dev, add_id, name);
+
+ /* successfully adds a port */
+ CU_ASSERT_EQUAL(rc, 0);
+ /* Assert num_ports has been incremented to 2 */
+ CU_ASSERT_EQUAL(dev.num_ports, 2);
+}
+
+static void
+dev_find_port_by_id_num_ports_zero(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ struct spdk_scsi_port *rp_port;
+ uint64_t id;
+
+ dev.num_ports = 0;
+ id = 1;
+
+ rp_port = spdk_scsi_dev_find_port_by_id(&dev, id);
+
+ /* returns null; since dev's num_ports is 0 */
+ CU_ASSERT_TRUE(rp_port == NULL);
+}
+
+static void
+dev_find_port_by_id_id_not_found_failure(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ struct spdk_scsi_port *rp_port;
+ const char *name;
+ int rc;
+ uint64_t id, find_id;
+
+ id = 1;
+ dev.num_ports = 1;
+ name = "Name of Port";
+ find_id = 2;
+
+ /* Add a port with id = 1 */
+ rc = spdk_scsi_dev_add_port(&dev, id, name);
+
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /* Find port with id = 2 */
+ rp_port = spdk_scsi_dev_find_port_by_id(&dev, find_id);
+
+ /* returns null; failed to find port specified by id = 2 */
+ CU_ASSERT_TRUE(rp_port == NULL);
+}
+
+static void
+dev_find_port_by_id_success(void)
+{
+ struct spdk_scsi_dev dev = { 0 };
+ struct spdk_scsi_port *rp_port;
+ const char *name;
+ int rc;
+ uint64_t id;
+
+ id = 1;
+ dev.num_ports = 1;
+ name = "Name of Port";
+
+ /* Add a port */
+ rc = spdk_scsi_dev_add_port(&dev, id, name);
+
+ CU_ASSERT_EQUAL(rc, 0);
+
+ /* Find port by the same id as the one added above */
+ rp_port = spdk_scsi_dev_find_port_by_id(&dev, id);
+
+ /* Successfully found port specified by id */
+ CU_ASSERT_TRUE(rp_port != NULL);
+ if (rp_port != NULL) {
+ /* Assert the found port's id and name are same as
+ * the port added. */
+ CU_ASSERT_EQUAL(rp_port->id, 1);
+ CU_ASSERT_STRING_EQUAL(rp_port->name, "Name of Port");
+ }
+}
+
+static void
+dev_add_lun_bdev_not_found(void)
+{
+ int rc;
+ struct spdk_scsi_dev dev = {0};
+
+ rc = spdk_scsi_dev_add_lun(&dev, "malloc2", 0, NULL, NULL);
+
+ SPDK_CU_ASSERT_FATAL(dev.lun[0] == NULL);
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+}
+
+static void
+dev_add_lun_no_free_lun_id(void)
+{
+ int rc;
+ int i;
+ struct spdk_scsi_dev dev = {0};
+ struct spdk_scsi_lun lun;
+
+ for (i = 0; i < SPDK_SCSI_DEV_MAX_LUN; i++) {
+ dev.lun[i] = &lun;
+ }
+
+ rc = spdk_scsi_dev_add_lun(&dev, "malloc0", -1, NULL, NULL);
+
+ CU_ASSERT_NOT_EQUAL(rc, 0);
+}
+
+static void
+dev_add_lun_success1(void)
+{
+ int rc;
+ struct spdk_scsi_dev dev = {0};
+
+ rc = spdk_scsi_dev_add_lun(&dev, "malloc0", -1, NULL, NULL);
+
+ CU_ASSERT_EQUAL(rc, 0);
+
+ spdk_scsi_dev_destruct(&dev);
+}
+
+static void
+dev_add_lun_success2(void)
+{
+ int rc;
+ struct spdk_scsi_dev dev = {0};
+
+ rc = spdk_scsi_dev_add_lun(&dev, "malloc0", 0, NULL, NULL);
+
+ CU_ASSERT_EQUAL(rc, 0);
+
+ spdk_scsi_dev_destruct(&dev);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("dev_suite", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "destruct - null dev",
+ dev_destruct_null_dev) == NULL
+ || CU_add_test(suite, "destruct - zero luns", dev_destruct_zero_luns) == NULL
+ || CU_add_test(suite, "destruct - null lun", dev_destruct_null_lun) == NULL
+ || CU_add_test(suite, "destruct - success", dev_destruct_success) == NULL
+ || CU_add_test(suite, "construct - queue depth gt max depth",
+ dev_construct_num_luns_zero) == NULL
+ || CU_add_test(suite, "construct - no lun0",
+ dev_construct_no_lun_zero) == NULL
+ || CU_add_test(suite, "construct - null lun",
+ dev_construct_null_lun) == NULL
+ || CU_add_test(suite, "construct - name too long", dev_construct_name_too_long) == NULL
+ || CU_add_test(suite, "construct - success", dev_construct_success) == NULL
+ || CU_add_test(suite, "construct - success - LUN zero not first",
+ dev_construct_success_lun_zero_not_first) == NULL
+ || CU_add_test(suite, "dev queue task mgmt - success",
+ dev_queue_mgmt_task_success) == NULL
+ || CU_add_test(suite, "dev queue task - success",
+ dev_queue_task_success) == NULL
+ || CU_add_test(suite, "dev stop - success", dev_stop_success) == NULL
+ || CU_add_test(suite, "dev add port - max ports",
+ dev_add_port_max_ports) == NULL
+ || CU_add_test(suite, "dev add port - construct port failure 1",
+ dev_add_port_construct_failure1) == NULL
+ || CU_add_test(suite, "dev add port - construct port failure 2",
+ dev_add_port_construct_failure2) == NULL
+ || CU_add_test(suite, "dev add port - success 1",
+ dev_add_port_success1) == NULL
+ || CU_add_test(suite, "dev add port - success 2",
+ dev_add_port_success2) == NULL
+ || CU_add_test(suite, "dev add port - success 3",
+ dev_add_port_success3) == NULL
+ || CU_add_test(suite, "dev find port by id - num ports zero",
+ dev_find_port_by_id_num_ports_zero) == NULL
+ || CU_add_test(suite, "dev find port by id - different port id failure",
+ dev_find_port_by_id_id_not_found_failure) == NULL
+ || CU_add_test(suite, "dev find port by id - success",
+ dev_find_port_by_id_success) == NULL
+ || CU_add_test(suite, "dev add lun - bdev not found",
+ dev_add_lun_bdev_not_found) == NULL
+ || CU_add_test(suite, "dev add lun - no free lun id",
+ dev_add_lun_no_free_lun_id) == NULL
+ || CU_add_test(suite, "dev add lun - success 1",
+ dev_add_lun_success1) == NULL
+ || CU_add_test(suite, "dev add lun - success 2",
+ dev_add_lun_success2) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/scsi/lun.c/.gitignore b/src/spdk/test/unit/lib/scsi/lun.c/.gitignore
new file mode 100644
index 00000000..89bd2aaf
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/lun.c/.gitignore
@@ -0,0 +1 @@
+lun_ut
diff --git a/src/spdk/test/unit/lib/scsi/lun.c/Makefile b/src/spdk/test/unit/lib/scsi/lun.c/Makefile
new file mode 100644
index 00000000..22841b0d
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/lun.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = lun_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/scsi/lun.c/lun_ut.c b/src/spdk/test/unit/lib/scsi/lun.c/lun_ut.c
new file mode 100644
index 00000000..2237e8ed
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/lun.c/lun_ut.c
@@ -0,0 +1,654 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "scsi/task.c"
+#include "scsi/lun.c"
+
+#include "spdk_internal/mock.h"
+
+/* Unit test bdev mockup */
+struct spdk_bdev {
+ int x;
+};
+
+SPDK_LOG_REGISTER_COMPONENT("scsi", SPDK_LOG_SCSI)
+
+struct spdk_scsi_globals g_spdk_scsi;
+
+static bool g_lun_execute_fail = false;
+static int g_lun_execute_status = SPDK_SCSI_TASK_PENDING;
+static uint32_t g_task_count = 0;
+
+struct spdk_poller *
+spdk_poller_register(spdk_poller_fn fn,
+ void *arg,
+ uint64_t period_microseconds)
+{
+ return NULL;
+}
+
+void
+spdk_poller_unregister(struct spdk_poller **ppoller)
+{
+}
+
+void
+spdk_thread_send_msg(const struct spdk_thread *thread, spdk_thread_fn fn, void *ctx)
+{
+}
+
+struct spdk_trace_histories *g_trace_histories;
+void _spdk_trace_record(uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id,
+ uint32_t size, uint64_t object_id, uint64_t arg1)
+{
+}
+
+static void
+spdk_lun_ut_cpl_task(struct spdk_scsi_task *task)
+{
+ SPDK_CU_ASSERT_FATAL(g_task_count > 0);
+ g_task_count--;
+}
+
+static void
+spdk_lun_ut_free_task(struct spdk_scsi_task *task)
+{
+}
+
+static void
+ut_init_task(struct spdk_scsi_task *task)
+{
+ memset(task, 0, sizeof(*task));
+ spdk_scsi_task_construct(task, spdk_lun_ut_cpl_task,
+ spdk_lun_ut_free_task);
+ g_task_count++;
+}
+
+void *
+spdk_dma_malloc(size_t size, size_t align, uint64_t *phys_addr)
+{
+ void *buf = malloc(size);
+ if (phys_addr) {
+ *phys_addr = (uint64_t)buf;
+ }
+ return buf;
+}
+
+void *
+spdk_dma_zmalloc(size_t size, size_t align, uint64_t *phys_addr)
+{
+ void *buf = calloc(size, 1);
+ if (phys_addr) {
+ *phys_addr = (uint64_t)buf;
+ }
+ return buf;
+}
+
+void
+spdk_dma_free(void *buf)
+{
+ free(buf);
+}
+
+void
+spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
+{
+ CU_ASSERT(0);
+}
+
+int
+spdk_bdev_open(struct spdk_bdev *bdev, bool write, spdk_bdev_remove_cb_t remove_cb,
+ void *remove_ctx, struct spdk_bdev_desc **desc)
+{
+ return 0;
+}
+
+void
+spdk_bdev_close(struct spdk_bdev_desc *desc)
+{
+}
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return "test";
+}
+
+void spdk_scsi_dev_queue_mgmt_task(struct spdk_scsi_dev *dev,
+ struct spdk_scsi_task *task,
+ enum spdk_scsi_task_func func)
+{
+}
+
+void spdk_scsi_dev_delete_lun(struct spdk_scsi_dev *dev,
+ struct spdk_scsi_lun *lun)
+{
+ return;
+}
+
+void
+spdk_bdev_scsi_reset(struct spdk_scsi_task *task)
+{
+ return;
+}
+
+int
+spdk_bdev_scsi_execute(struct spdk_scsi_task *task)
+{
+ if (g_lun_execute_fail) {
+ return -EINVAL;
+ } else {
+ task->status = SPDK_SCSI_STATUS_GOOD;
+
+ if (g_lun_execute_status == SPDK_SCSI_TASK_PENDING) {
+ return g_lun_execute_status;
+ } else if (g_lun_execute_status == SPDK_SCSI_TASK_COMPLETE) {
+ return g_lun_execute_status;
+ } else {
+ return 0;
+ }
+ }
+}
+
+struct spdk_io_channel *
+spdk_bdev_get_io_channel(struct spdk_bdev_desc *desc)
+{
+ return NULL;
+}
+
+void
+spdk_put_io_channel(struct spdk_io_channel *ch)
+{
+}
+
+DEFINE_STUB(spdk_io_channel_get_thread, struct spdk_thread *, (struct spdk_io_channel *ch), NULL)
+DEFINE_STUB(spdk_get_thread, struct spdk_thread *, (void), NULL)
+
+static _spdk_scsi_lun *
+lun_construct(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_bdev bdev;
+
+ lun = spdk_scsi_lun_construct(&bdev, NULL, NULL);
+
+ SPDK_CU_ASSERT_FATAL(lun != NULL);
+ return lun;
+}
+
+static void
+lun_destruct(struct spdk_scsi_lun *lun)
+{
+ /* LUN will defer its removal if there are any unfinished tasks */
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&lun->tasks));
+
+ spdk_scsi_lun_destruct(lun);
+}
+
+static void
+lun_task_mgmt_execute_null_task(void)
+{
+ int rc;
+
+ rc = spdk_scsi_lun_task_mgmt_execute(NULL, SPDK_SCSI_TASK_FUNC_ABORT_TASK);
+
+ /* returns -1 since we passed NULL for the task */
+ CU_ASSERT_TRUE(rc < 0);
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_task_mgmt_execute_abort_task_null_lun_failure(void)
+{
+ struct spdk_scsi_task mgmt_task = { 0 };
+ struct spdk_scsi_port initiator_port = { 0 };
+ int rc;
+
+ ut_init_task(&mgmt_task);
+ mgmt_task.lun = NULL;
+ mgmt_task.initiator_port = &initiator_port;
+
+ rc = spdk_scsi_lun_task_mgmt_execute(&mgmt_task, SPDK_SCSI_TASK_FUNC_ABORT_TASK);
+
+ /* returns -1 since we passed NULL for LUN */
+ CU_ASSERT_TRUE(rc < 0);
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_task_mgmt_execute_abort_task_not_supported(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task task = { 0 };
+ struct spdk_scsi_task mgmt_task = { 0 };
+ struct spdk_scsi_port initiator_port = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+ uint8_t cdb[6] = { 0 };
+ int rc;
+
+ lun = lun_construct();
+ lun->dev = &dev;
+
+ ut_init_task(&mgmt_task);
+ mgmt_task.lun = lun;
+ mgmt_task.initiator_port = &initiator_port;
+
+ /* Params to add regular task to the lun->tasks */
+ ut_init_task(&task);
+ task.lun = lun;
+ task.cdb = cdb;
+
+ spdk_scsi_lun_execute_task(lun, &task);
+
+ /* task should now be on the tasks list */
+ CU_ASSERT(!TAILQ_EMPTY(&lun->tasks));
+
+ rc = spdk_scsi_lun_task_mgmt_execute(&mgmt_task, SPDK_SCSI_TASK_FUNC_ABORT_TASK);
+
+ /* returns -1 since task abort is not supported */
+ CU_ASSERT_TRUE(rc < 0);
+ CU_ASSERT(mgmt_task.response == SPDK_SCSI_TASK_MGMT_RESP_REJECT_FUNC_NOT_SUPPORTED);
+
+ /* task is still on the tasks list */
+ CU_ASSERT_EQUAL(g_task_count, 1);
+
+ spdk_scsi_lun_complete_task(lun, &task);
+ CU_ASSERT_EQUAL(g_task_count, 0);
+
+ lun_destruct(lun);
+}
+
+static void
+lun_task_mgmt_execute_abort_task_all_null_lun_failure(void)
+{
+ struct spdk_scsi_task mgmt_task = { 0 };
+ struct spdk_scsi_port initiator_port = { 0 };
+ int rc;
+
+ ut_init_task(&mgmt_task);
+ mgmt_task.lun = NULL;
+ mgmt_task.initiator_port = &initiator_port;
+
+ rc = spdk_scsi_lun_task_mgmt_execute(&mgmt_task, SPDK_SCSI_TASK_FUNC_ABORT_TASK_SET);
+
+ /* Returns -1 since we passed NULL for lun */
+ CU_ASSERT_TRUE(rc < 0);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_task_mgmt_execute_abort_task_all_not_supported(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task task = { 0 };
+ struct spdk_scsi_task mgmt_task = { 0 };
+ struct spdk_scsi_port initiator_port = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+ int rc;
+ uint8_t cdb[6] = { 0 };
+
+ lun = lun_construct();
+ lun->dev = &dev;
+
+ ut_init_task(&mgmt_task);
+ mgmt_task.lun = lun;
+ mgmt_task.initiator_port = &initiator_port;
+
+ /* Params to add regular task to the lun->tasks */
+ ut_init_task(&task);
+ task.initiator_port = &initiator_port;
+ task.lun = lun;
+ task.cdb = cdb;
+
+ spdk_scsi_lun_execute_task(lun, &task);
+
+ /* task should now be on the tasks list */
+ CU_ASSERT(!TAILQ_EMPTY(&lun->tasks));
+
+ rc = spdk_scsi_lun_task_mgmt_execute(&mgmt_task, SPDK_SCSI_TASK_FUNC_ABORT_TASK_SET);
+
+ /* returns -1 since task abort is not supported */
+ CU_ASSERT_TRUE(rc < 0);
+ CU_ASSERT(mgmt_task.response == SPDK_SCSI_TASK_MGMT_RESP_REJECT_FUNC_NOT_SUPPORTED);
+
+ /* task is still on the tasks list */
+ CU_ASSERT_EQUAL(g_task_count, 1);
+
+ spdk_scsi_lun_complete_task(lun, &task);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+
+ lun_destruct(lun);
+}
+
+static void
+lun_task_mgmt_execute_lun_reset_failure(void)
+{
+ struct spdk_scsi_task mgmt_task = { 0 };
+ int rc;
+
+ ut_init_task(&mgmt_task);
+ mgmt_task.lun = NULL;
+
+ rc = spdk_scsi_lun_task_mgmt_execute(&mgmt_task, SPDK_SCSI_TASK_FUNC_LUN_RESET);
+
+ /* Returns -1 since we passed NULL for lun */
+ CU_ASSERT_TRUE(rc < 0);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_task_mgmt_execute_lun_reset(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task mgmt_task = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+ int rc;
+
+ lun = lun_construct();
+ lun->dev = &dev;
+
+ ut_init_task(&mgmt_task);
+ mgmt_task.lun = lun;
+
+ rc = spdk_scsi_lun_task_mgmt_execute(&mgmt_task, SPDK_SCSI_TASK_FUNC_LUN_RESET);
+
+ /* Returns success */
+ CU_ASSERT_EQUAL(rc, 0);
+
+ lun_destruct(lun);
+
+ /* task is still on the tasks list */
+ CU_ASSERT_EQUAL(g_task_count, 1);
+ g_task_count = 0;
+}
+
+static void
+lun_task_mgmt_execute_invalid_case(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task mgmt_task = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+ int rc;
+
+ lun = lun_construct();
+ lun->dev = &dev;
+
+ ut_init_task(&mgmt_task);
+ /* Pass an invalid value to the switch statement */
+ rc = spdk_scsi_lun_task_mgmt_execute(&mgmt_task, 5);
+
+ /* Returns -1 on passing an invalid value to the switch case */
+ CU_ASSERT_TRUE(rc < 0);
+
+ lun_destruct(lun);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_append_task_null_lun_task_cdb_spc_inquiry(void)
+{
+ struct spdk_scsi_task task = { 0 };
+ uint8_t cdb[6] = { 0 };
+
+ ut_init_task(&task);
+ task.cdb = cdb;
+ task.cdb[0] = SPDK_SPC_INQUIRY;
+ /* alloc_len >= 4096 */
+ task.cdb[3] = 0xFF;
+ task.cdb[4] = 0xFF;
+ task.lun = NULL;
+
+ spdk_scsi_task_process_null_lun(&task);
+
+ CU_ASSERT_EQUAL(task.status, SPDK_SCSI_STATUS_GOOD);
+
+ spdk_scsi_task_put(&task);
+
+ /* spdk_scsi_task_process_null_lun() does not call cpl_fn */
+ CU_ASSERT_EQUAL(g_task_count, 1);
+ g_task_count = 0;
+}
+
+static void
+lun_append_task_null_lun_alloc_len_lt_4096(void)
+{
+ struct spdk_scsi_task task = { 0 };
+ uint8_t cdb[6] = { 0 };
+
+ ut_init_task(&task);
+ task.cdb = cdb;
+ task.cdb[0] = SPDK_SPC_INQUIRY;
+ /* alloc_len < 4096 */
+ task.cdb[3] = 0;
+ task.cdb[4] = 0;
+ /* alloc_len is set to a minimal value of 4096
+ * Hence, buf of size 4096 is allocated */
+ spdk_scsi_task_process_null_lun(&task);
+
+ CU_ASSERT_EQUAL(task.status, SPDK_SCSI_STATUS_GOOD);
+
+ spdk_scsi_task_put(&task);
+
+ /* spdk_scsi_task_process_null_lun() does not call cpl_fn */
+ CU_ASSERT_EQUAL(g_task_count, 1);
+ g_task_count = 0;
+}
+
+static void
+lun_append_task_null_lun_not_supported(void)
+{
+ struct spdk_scsi_task task = { 0 };
+ uint8_t cdb[6] = { 0 };
+
+ ut_init_task(&task);
+ task.cdb = cdb;
+ task.lun = NULL;
+
+ spdk_scsi_task_process_null_lun(&task);
+
+ CU_ASSERT_EQUAL(task.status, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ /* LUN not supported; task's data transferred should be 0 */
+ CU_ASSERT_EQUAL(task.data_transferred, 0);
+
+ /* spdk_scsi_task_process_null_lun() does not call cpl_fn */
+ CU_ASSERT_EQUAL(g_task_count, 1);
+ g_task_count = 0;
+}
+
+static void
+lun_execute_scsi_task_pending(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task task = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+
+ lun = lun_construct();
+
+ ut_init_task(&task);
+ task.lun = lun;
+ lun->dev = &dev;
+
+ g_lun_execute_fail = false;
+ g_lun_execute_status = SPDK_SCSI_TASK_PENDING;
+
+ /* the tasks list should still be empty since it has not been
+ executed yet
+ */
+ CU_ASSERT(TAILQ_EMPTY(&lun->tasks));
+
+ spdk_scsi_lun_execute_task(lun, &task);
+
+ /* Assert the task has been successfully added to the tasks queue */
+ CU_ASSERT(!TAILQ_EMPTY(&lun->tasks));
+
+ /* task is still on the tasks list */
+ CU_ASSERT_EQUAL(g_task_count, 1);
+
+ /* Need to complete task so LUN might be removed right now */
+ spdk_scsi_lun_complete_task(lun, &task);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+
+ lun_destruct(lun);
+}
+
+static void
+lun_execute_scsi_task_complete(void)
+{
+ struct spdk_scsi_lun *lun;
+ struct spdk_scsi_task task = { 0 };
+ struct spdk_scsi_dev dev = { 0 };
+
+ lun = lun_construct();
+
+ ut_init_task(&task);
+ task.lun = lun;
+ lun->dev = &dev;
+
+ g_lun_execute_fail = false;
+ g_lun_execute_status = SPDK_SCSI_TASK_COMPLETE;
+
+ /* the tasks list should still be empty since it has not been
+ executed yet
+ */
+ CU_ASSERT(TAILQ_EMPTY(&lun->tasks));
+
+ spdk_scsi_lun_execute_task(lun, &task);
+
+ /* Assert the task has not been added to the tasks queue */
+ CU_ASSERT(TAILQ_EMPTY(&lun->tasks));
+
+ lun_destruct(lun);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_destruct_success(void)
+{
+ struct spdk_scsi_lun *lun;
+
+ lun = lun_construct();
+
+ spdk_scsi_lun_destruct(lun);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_construct_null_ctx(void)
+{
+ struct spdk_scsi_lun *lun;
+
+ lun = spdk_scsi_lun_construct(NULL, NULL, NULL);
+
+ /* lun should be NULL since we passed NULL for the ctx pointer. */
+ CU_ASSERT(lun == NULL);
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+static void
+lun_construct_success(void)
+{
+ struct spdk_scsi_lun *lun = lun_construct();
+
+ lun_destruct(lun);
+
+ CU_ASSERT_EQUAL(g_task_count, 0);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("lun_suite", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "task management - null task failure",
+ lun_task_mgmt_execute_null_task) == NULL
+ || CU_add_test(suite, "task management abort task - null lun failure",
+ lun_task_mgmt_execute_abort_task_null_lun_failure) == NULL
+ || CU_add_test(suite, "task management abort task - not supported",
+ lun_task_mgmt_execute_abort_task_not_supported) == NULL
+ || CU_add_test(suite, "task management abort task set - null lun failure",
+ lun_task_mgmt_execute_abort_task_all_null_lun_failure) == NULL
+ || CU_add_test(suite, "task management abort task set - success",
+ lun_task_mgmt_execute_abort_task_all_not_supported) == NULL
+ || CU_add_test(suite, "task management - lun reset failure",
+ lun_task_mgmt_execute_lun_reset_failure) == NULL
+ || CU_add_test(suite, "task management - lun reset success",
+ lun_task_mgmt_execute_lun_reset) == NULL
+ || CU_add_test(suite, "task management - invalid option",
+ lun_task_mgmt_execute_invalid_case) == NULL
+ || CU_add_test(suite, "append task - null lun SPDK_SPC_INQUIRY",
+ lun_append_task_null_lun_task_cdb_spc_inquiry) == NULL
+ || CU_add_test(suite, "append task - allocated length less than 4096",
+ lun_append_task_null_lun_alloc_len_lt_4096) == NULL
+ || CU_add_test(suite, "append task - unsupported lun",
+ lun_append_task_null_lun_not_supported) == NULL
+ || CU_add_test(suite, "execute task - scsi task pending",
+ lun_execute_scsi_task_pending) == NULL
+ || CU_add_test(suite, "execute task - scsi task complete",
+ lun_execute_scsi_task_complete) == NULL
+ || CU_add_test(suite, "destruct task - success", lun_destruct_success) == NULL
+ || CU_add_test(suite, "construct - null ctx", lun_construct_null_ctx) == NULL
+ || CU_add_test(suite, "construct - success", lun_construct_success) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/scsi/scsi.c/.gitignore b/src/spdk/test/unit/lib/scsi/scsi.c/.gitignore
new file mode 100644
index 00000000..99a7db2b
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi.c/.gitignore
@@ -0,0 +1 @@
+scsi_ut
diff --git a/src/spdk/test/unit/lib/scsi/scsi.c/Makefile b/src/spdk/test/unit/lib/scsi/scsi.c/Makefile
new file mode 100644
index 00000000..86893653
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi.c/Makefile
@@ -0,0 +1,41 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+SPDK_LIB_LIST = trace
+TEST_FILE = scsi_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/scsi/scsi.c/scsi_ut.c b/src/spdk/test/unit/lib/scsi/scsi.c/scsi_ut.c
new file mode 100644
index 00000000..5a1a31f6
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi.c/scsi_ut.c
@@ -0,0 +1,80 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk/scsi.h"
+
+#include "spdk_cunit.h"
+
+#include "scsi/scsi.c"
+
+static void
+scsi_init(void)
+{
+ int rc;
+
+ rc = spdk_scsi_init();
+ CU_ASSERT_EQUAL(rc, 0);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("scsi_suite", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "scsi init", \
+ scsi_init) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/scsi/scsi_bdev.c/.gitignore b/src/spdk/test/unit/lib/scsi/scsi_bdev.c/.gitignore
new file mode 100644
index 00000000..8f1ecc12
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi_bdev.c/.gitignore
@@ -0,0 +1 @@
+scsi_bdev_ut
diff --git a/src/spdk/test/unit/lib/scsi/scsi_bdev.c/Makefile b/src/spdk/test/unit/lib/scsi/scsi_bdev.c/Makefile
new file mode 100644
index 00000000..abb1de50
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi_bdev.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = scsi_bdev_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/scsi/scsi_bdev.c/scsi_bdev_ut.c b/src/spdk/test/unit/lib/scsi/scsi_bdev.c/scsi_bdev_ut.c
new file mode 100644
index 00000000..4deb2cec
--- /dev/null
+++ b/src/spdk/test/unit/lib/scsi/scsi_bdev.c/scsi_bdev_ut.c
@@ -0,0 +1,988 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "scsi/task.c"
+#include "scsi/scsi_bdev.c"
+
+#include "spdk_cunit.h"
+
+SPDK_LOG_REGISTER_COMPONENT("scsi", SPDK_LOG_SCSI)
+
+struct spdk_scsi_globals g_spdk_scsi;
+
+static uint64_t g_test_bdev_num_blocks;
+
+TAILQ_HEAD(, spdk_bdev_io) g_bdev_io_queue;
+int g_scsi_cb_called = 0;
+
+TAILQ_HEAD(, spdk_bdev_io_wait_entry) g_io_wait_queue;
+bool g_bdev_io_pool_full = false;
+
+void *
+spdk_dma_malloc(size_t size, size_t align, uint64_t *phys_addr)
+{
+ void *buf = malloc(size);
+ if (phys_addr) {
+ *phys_addr = (uint64_t)buf;
+ }
+
+ return buf;
+}
+
+void *
+spdk_dma_zmalloc(size_t size, size_t align, uint64_t *phys_addr)
+{
+ void *buf = calloc(size, 1);
+ if (phys_addr) {
+ *phys_addr = (uint64_t)buf;
+ }
+
+ return buf;
+}
+
+void
+spdk_dma_free(void *buf)
+{
+ free(buf);
+}
+
+bool
+spdk_bdev_io_type_supported(struct spdk_bdev *bdev, enum spdk_bdev_io_type io_type)
+{
+ abort();
+ return false;
+}
+
+void
+spdk_bdev_free_io(struct spdk_bdev_io *bdev_io)
+{
+ CU_ASSERT(0);
+}
+
+const char *
+spdk_bdev_get_name(const struct spdk_bdev *bdev)
+{
+ return "test";
+}
+
+uint32_t
+spdk_bdev_get_block_size(const struct spdk_bdev *bdev)
+{
+ return 512;
+}
+
+uint64_t
+spdk_bdev_get_num_blocks(const struct spdk_bdev *bdev)
+{
+ return g_test_bdev_num_blocks;
+}
+
+const char *
+spdk_bdev_get_product_name(const struct spdk_bdev *bdev)
+{
+ return "test product";
+}
+
+bool
+spdk_bdev_has_write_cache(const struct spdk_bdev *bdev)
+{
+ return false;
+}
+
+void
+spdk_scsi_lun_complete_task(struct spdk_scsi_lun *lun, struct spdk_scsi_task *task)
+{
+ g_scsi_cb_called++;
+}
+
+void
+spdk_scsi_lun_complete_mgmt_task(struct spdk_scsi_lun *lun, struct spdk_scsi_task *task)
+{
+}
+
+static void
+ut_put_task(struct spdk_scsi_task *task)
+{
+ if (task->alloc_len) {
+ free(task->iov.iov_base);
+ }
+
+ task->iov.iov_base = NULL;
+ task->iov.iov_len = 0;
+ task->alloc_len = 0;
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_bdev_io_queue));
+}
+
+
+static void
+ut_init_task(struct spdk_scsi_task *task)
+{
+ memset(task, 0xFF, sizeof(*task));
+ task->iov.iov_base = NULL;
+ task->iovs = &task->iov;
+ task->iovcnt = 1;
+ task->alloc_len = 0;
+ task->dxfer_dir = SPDK_SCSI_DIR_NONE;
+}
+
+void
+spdk_bdev_io_get_scsi_status(const struct spdk_bdev_io *bdev_io,
+ int *sc, int *sk, int *asc, int *ascq)
+{
+ switch (bdev_io->internal.status) {
+ case SPDK_BDEV_IO_STATUS_SUCCESS:
+ *sc = SPDK_SCSI_STATUS_GOOD;
+ *sk = SPDK_SCSI_SENSE_NO_SENSE;
+ *asc = SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE;
+ *ascq = SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ case SPDK_BDEV_IO_STATUS_SCSI_ERROR:
+ *sc = bdev_io->internal.error.scsi.sc;
+ *sk = bdev_io->internal.error.scsi.sk;
+ *asc = bdev_io->internal.error.scsi.asc;
+ *ascq = bdev_io->internal.error.scsi.ascq;
+ break;
+ default:
+ *sc = SPDK_SCSI_STATUS_CHECK_CONDITION;
+ *sk = SPDK_SCSI_SENSE_ABORTED_COMMAND;
+ *asc = SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE;
+ *ascq = SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
+ break;
+ }
+}
+
+void
+spdk_bdev_io_get_iovec(struct spdk_bdev_io *bdev_io, struct iovec **iovp, int *iovcntp)
+{
+ *iovp = NULL;
+ *iovcntp = 0;
+}
+
+static void
+ut_bdev_io_flush(void)
+{
+ struct spdk_bdev_io *bdev_io;
+ struct spdk_bdev_io_wait_entry *entry;
+
+ while (!TAILQ_EMPTY(&g_bdev_io_queue) || !TAILQ_EMPTY(&g_io_wait_queue)) {
+ while (!TAILQ_EMPTY(&g_bdev_io_queue)) {
+ bdev_io = TAILQ_FIRST(&g_bdev_io_queue);
+ TAILQ_REMOVE(&g_bdev_io_queue, bdev_io, internal.link);
+ bdev_io->internal.cb(bdev_io, true, bdev_io->internal.caller_ctx);
+ free(bdev_io);
+ }
+
+ while (!TAILQ_EMPTY(&g_io_wait_queue)) {
+ entry = TAILQ_FIRST(&g_io_wait_queue);
+ TAILQ_REMOVE(&g_io_wait_queue, entry, link);
+ entry->cb_fn(entry->cb_arg);
+ }
+ }
+}
+
+static int
+_spdk_bdev_io_op(spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ struct spdk_bdev_io *bdev_io;
+
+ if (g_bdev_io_pool_full) {
+ g_bdev_io_pool_full = false;
+ return -ENOMEM;
+ }
+
+ bdev_io = calloc(1, sizeof(*bdev_io));
+ SPDK_CU_ASSERT_FATAL(bdev_io != NULL);
+ bdev_io->internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ bdev_io->internal.cb = cb;
+ bdev_io->internal.caller_ctx = cb_arg;
+
+ TAILQ_INSERT_TAIL(&g_bdev_io_queue, bdev_io, internal.link);
+
+ return 0;
+}
+
+int
+spdk_bdev_readv(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt, uint64_t offset, uint64_t nbytes,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return _spdk_bdev_io_op(cb, cb_arg);
+}
+
+int
+spdk_bdev_writev(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ struct iovec *iov, int iovcnt,
+ uint64_t offset, uint64_t len,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return _spdk_bdev_io_op(cb, cb_arg);
+}
+
+int
+spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return _spdk_bdev_io_op(cb, cb_arg);
+}
+
+int
+spdk_bdev_reset(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return _spdk_bdev_io_op(cb, cb_arg);
+}
+
+int
+spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch,
+ uint64_t offset_blocks, uint64_t num_blocks,
+ spdk_bdev_io_completion_cb cb, void *cb_arg)
+{
+ return _spdk_bdev_io_op(cb, cb_arg);
+}
+
+int
+spdk_bdev_queue_io_wait(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
+ struct spdk_bdev_io_wait_entry *entry)
+{
+ TAILQ_INSERT_TAIL(&g_io_wait_queue, entry, link);
+ return 0;
+}
+
+/*
+ * This test specifically tests a mode select 6 command from the
+ * Windows SCSI compliance test that caused SPDK to crash.
+ */
+static void
+mode_select_6_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_scsi_task task;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_dev dev;
+ char cdb[16];
+ char data[24];
+ int rc;
+
+ ut_init_task(&task);
+
+ cdb[0] = 0x15;
+ cdb[1] = 0x11;
+ cdb[2] = 0x00;
+ cdb[3] = 0x00;
+ cdb[4] = 0x18;
+ cdb[5] = 0x00;
+ task.cdb = cdb;
+
+ snprintf(&dev.name[0], sizeof(dev.name), "spdk_iscsi_translation_test");
+ lun.bdev = &bdev;
+ lun.dev = &dev;
+ task.lun = &lun;
+
+ memset(data, 0, sizeof(data));
+ data[4] = 0x08;
+ data[5] = 0x02;
+ spdk_scsi_task_set_data(&task, data, sizeof(data));
+
+ rc = spdk_bdev_scsi_execute(&task);
+
+ CU_ASSERT_EQUAL(rc, 0);
+
+ ut_put_task(&task);
+}
+
+/*
+ * This test specifically tests a mode select 6 command which
+ * contains no mode pages.
+ */
+static void
+mode_select_6_test2(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_scsi_task task;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_dev dev;
+ char cdb[16];
+ int rc;
+
+ ut_init_task(&task);
+
+ cdb[0] = 0x15;
+ cdb[1] = 0x00;
+ cdb[2] = 0x00;
+ cdb[3] = 0x00;
+ cdb[4] = 0x00;
+ cdb[5] = 0x00;
+ task.cdb = cdb;
+
+ snprintf(&dev.name[0], sizeof(dev.name), "spdk_iscsi_translation_test");
+ lun.bdev = &bdev;
+ lun.dev = &dev;
+ task.lun = &lun;
+
+ rc = spdk_bdev_scsi_execute(&task);
+
+ CU_ASSERT_EQUAL(rc, 0);
+
+ ut_put_task(&task);
+}
+
+/*
+ * This test specifically tests a mode sense 6 command which
+ * return all subpage 00h mode pages.
+ */
+static void
+mode_sense_6_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_scsi_task task;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_dev dev;
+ char cdb[12];
+ unsigned char *data;
+ int rc;
+ unsigned char mode_data_len = 0;
+ unsigned char medium_type = 0;
+ unsigned char dev_specific_param = 0;
+ unsigned char blk_descriptor_len = 0;
+
+ memset(&bdev, 0, sizeof(struct spdk_bdev));
+ ut_init_task(&task);
+ memset(cdb, 0, sizeof(cdb));
+
+ cdb[0] = 0x1A;
+ cdb[2] = 0x3F;
+ cdb[4] = 0xFF;
+ task.cdb = cdb;
+
+ snprintf(&dev.name[0], sizeof(dev.name), "spdk_iscsi_translation_test");
+ lun.bdev = &bdev;
+ lun.dev = &dev;
+ task.lun = &lun;
+
+ rc = spdk_bdev_scsi_execute(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ data = task.iovs[0].iov_base;
+ mode_data_len = data[0];
+ medium_type = data[1];
+ dev_specific_param = data[2];
+ blk_descriptor_len = data[3];
+
+ CU_ASSERT(mode_data_len >= 11);
+ CU_ASSERT_EQUAL(medium_type, 0);
+ CU_ASSERT_EQUAL(dev_specific_param, 0);
+ CU_ASSERT_EQUAL(blk_descriptor_len, 8);
+
+ ut_put_task(&task);
+}
+
+/*
+ * This test specifically tests a mode sense 10 command which
+ * return all subpage 00h mode pages.
+ */
+static void
+mode_sense_10_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_scsi_task task;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_dev dev;
+ char cdb[12];
+ unsigned char *data;
+ int rc;
+ unsigned short mode_data_len = 0;
+ unsigned char medium_type = 0;
+ unsigned char dev_specific_param = 0;
+ unsigned short blk_descriptor_len = 0;
+
+ memset(&bdev, 0, sizeof(struct spdk_bdev));
+ ut_init_task(&task);
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = 0x5A;
+ cdb[2] = 0x3F;
+ cdb[8] = 0xFF;
+ task.cdb = cdb;
+
+ snprintf(&dev.name[0], sizeof(dev.name), "spdk_iscsi_translation_test");
+ lun.bdev = &bdev;
+ lun.dev = &dev;
+ task.lun = &lun;
+
+ rc = spdk_bdev_scsi_execute(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ data = task.iovs[0].iov_base;
+ mode_data_len = ((data[0] << 8) + data[1]);
+ medium_type = data[2];
+ dev_specific_param = data[3];
+ blk_descriptor_len = ((data[6] << 8) + data[7]);
+
+ CU_ASSERT(mode_data_len >= 14);
+ CU_ASSERT_EQUAL(medium_type, 0);
+ CU_ASSERT_EQUAL(dev_specific_param, 0);
+ CU_ASSERT_EQUAL(blk_descriptor_len, 8);
+
+ ut_put_task(&task);
+}
+
+/*
+ * This test specifically tests a scsi inquiry command from the
+ * Windows SCSI compliance test that failed to return the
+ * expected SCSI error sense code.
+ */
+static void
+inquiry_evpd_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_scsi_task task;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_dev dev;
+ char cdb[6];
+ int rc;
+
+ ut_init_task(&task);
+
+ cdb[0] = 0x12;
+ cdb[1] = 0x00; // EVPD = 0
+ cdb[2] = 0xff; // PageCode non-zero
+ cdb[3] = 0x00;
+ cdb[4] = 0xff;
+ cdb[5] = 0x00;
+ task.cdb = cdb;
+
+ snprintf(&dev.name[0], sizeof(dev.name), "spdk_iscsi_translation_test");
+ lun.bdev = &bdev;
+ lun.dev = &dev;
+ task.lun = &lun;
+
+ rc = spdk_bdev_scsi_execute(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ CU_ASSERT_EQUAL(task.status, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(task.sense_data[2] & 0xf, SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT_EQUAL(task.sense_data[12], 0x24);
+ CU_ASSERT_EQUAL(task.sense_data[13], 0x0);
+
+ ut_put_task(&task);
+}
+
+/*
+ * This test is to verify specific return data for a standard scsi inquiry
+ * command: Version
+ */
+static void
+inquiry_standard_test(void)
+{
+ struct spdk_bdev bdev = { .blocklen = 512 };
+ struct spdk_scsi_task task;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_dev dev;
+ char cdb[6];
+ char *data;
+ struct spdk_scsi_cdb_inquiry_data *inq_data;
+ int rc;
+
+ ut_init_task(&task);
+
+ cdb[0] = 0x12;
+ cdb[1] = 0x00; // EVPD = 0
+ cdb[2] = 0x00; // PageCode zero - requesting standard inquiry
+ cdb[3] = 0x00;
+ cdb[4] = 0xff; // Indicate data size used by conformance test
+ cdb[5] = 0x00;
+ task.cdb = cdb;
+
+ snprintf(&dev.name[0], sizeof(dev.name), "spdk_iscsi_translation_test");
+ lun.bdev = &bdev;
+ lun.dev = &dev;
+ task.lun = &lun;
+
+ rc = spdk_bdev_scsi_execute(&task);
+
+ data = task.iovs[0].iov_base;
+ inq_data = (struct spdk_scsi_cdb_inquiry_data *)&data[0];
+
+ CU_ASSERT_EQUAL(inq_data->version, SPDK_SPC_VERSION_SPC3);
+ CU_ASSERT_EQUAL(rc, 0);
+
+ ut_put_task(&task);
+}
+
+static void
+_inquiry_overflow_test(uint8_t alloc_len)
+{
+ struct spdk_bdev bdev = { .blocklen = 512 };
+ struct spdk_scsi_task task;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_dev dev;
+ uint8_t cdb[6];
+ int rc;
+ /* expects a 4K internal data buffer */
+ char data[4096], data_compare[4096];
+
+ ut_init_task(&task);
+
+ cdb[0] = 0x12;
+ cdb[1] = 0x00; // EVPD = 0
+ cdb[2] = 0x00; // PageCode zero - requesting standard inquiry
+ cdb[3] = 0x00;
+ cdb[4] = alloc_len; // Indicate data size used by conformance test
+ cdb[5] = 0x00;
+ task.cdb = cdb;
+
+ snprintf(&dev.name[0], sizeof(dev.name), "spdk_iscsi_translation_test");
+ lun.bdev = &bdev;
+ lun.dev = &dev;
+ task.lun = &lun;
+
+ memset(data, 0, sizeof(data));
+ memset(data_compare, 0, sizeof(data_compare));
+
+ spdk_scsi_task_set_data(&task, data, sizeof(data));
+
+ rc = spdk_bdev_scsi_execute(&task);
+ SPDK_CU_ASSERT_FATAL(rc == 0);
+
+ CU_ASSERT_EQUAL(memcmp(data + alloc_len, data_compare + alloc_len, sizeof(data) - alloc_len), 0);
+ CU_ASSERT(task.data_transferred <= alloc_len);
+
+ ut_put_task(&task);
+}
+
+static void
+inquiry_overflow_test(void)
+{
+ int i;
+
+ for (i = 0; i < 256; i++) {
+ _inquiry_overflow_test(i);
+ }
+}
+
+static void
+scsi_name_padding_test(void)
+{
+ char name[SPDK_SCSI_DEV_MAX_NAME + 1];
+ char buf[SPDK_SCSI_DEV_MAX_NAME + 1];
+ int written, i;
+
+ /* case 1 */
+ memset(name, '\0', sizeof(name));
+ memset(name, 'x', 251);
+ written = spdk_bdev_scsi_pad_scsi_name(buf, name);
+
+ CU_ASSERT(written == 252);
+ CU_ASSERT(buf[250] == 'x');
+ CU_ASSERT(buf[251] == '\0');
+
+ /* case 2: */
+ memset(name, '\0', sizeof(name));
+ memset(name, 'x', 252);
+ written = spdk_bdev_scsi_pad_scsi_name(buf, name);
+
+ CU_ASSERT(written == 256);
+ CU_ASSERT(buf[251] == 'x');
+ for (i = 252; i < 256; i++) {
+ CU_ASSERT(buf[i] == '\0');
+ }
+
+ /* case 3 */
+ memset(name, '\0', sizeof(name));
+ memset(name, 'x', 255);
+ written = spdk_bdev_scsi_pad_scsi_name(buf, name);
+
+ CU_ASSERT(written == 256);
+ CU_ASSERT(buf[254] == 'x');
+ CU_ASSERT(buf[255] == '\0');
+}
+
+/*
+ * This test is to verify specific error translation from bdev to scsi.
+ */
+static void
+task_complete_test(void)
+{
+ struct spdk_scsi_task task;
+ struct spdk_bdev_io bdev_io = {};
+ struct spdk_scsi_lun lun;
+
+ ut_init_task(&task);
+
+ TAILQ_INIT(&lun.tasks);
+ TAILQ_INSERT_TAIL(&lun.tasks, &task, scsi_link);
+ task.lun = &lun;
+
+ bdev_io.internal.status = SPDK_BDEV_IO_STATUS_SUCCESS;
+ spdk_bdev_scsi_task_complete_cmd(&bdev_io, bdev_io.internal.status, &task);
+ CU_ASSERT_EQUAL(task.status, SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+
+ bdev_io.internal.status = SPDK_BDEV_IO_STATUS_SCSI_ERROR;
+ bdev_io.internal.error.scsi.sc = SPDK_SCSI_STATUS_CHECK_CONDITION;
+ bdev_io.internal.error.scsi.sk = SPDK_SCSI_SENSE_HARDWARE_ERROR;
+ bdev_io.internal.error.scsi.asc = SPDK_SCSI_ASC_WARNING;
+ bdev_io.internal.error.scsi.ascq = SPDK_SCSI_ASCQ_POWER_LOSS_EXPECTED;
+ spdk_bdev_scsi_task_complete_cmd(&bdev_io, bdev_io.internal.status, &task);
+ CU_ASSERT_EQUAL(task.status, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(task.sense_data[2] & 0xf, SPDK_SCSI_SENSE_HARDWARE_ERROR);
+ CU_ASSERT_EQUAL(task.sense_data[12], SPDK_SCSI_ASC_WARNING);
+ CU_ASSERT_EQUAL(task.sense_data[13], SPDK_SCSI_ASCQ_POWER_LOSS_EXPECTED);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+
+ bdev_io.internal.status = SPDK_BDEV_IO_STATUS_FAILED;
+ spdk_bdev_scsi_task_complete_cmd(&bdev_io, bdev_io.internal.status, &task);
+ CU_ASSERT_EQUAL(task.status, SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT_EQUAL(task.sense_data[2] & 0xf, SPDK_SCSI_SENSE_ABORTED_COMMAND);
+ CU_ASSERT_EQUAL(task.sense_data[12], SPDK_SCSI_ASC_NO_ADDITIONAL_SENSE);
+ CU_ASSERT_EQUAL(task.sense_data[13], SPDK_SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+
+ ut_put_task(&task);
+}
+
+static void
+lba_range_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_task task;
+ uint8_t cdb[16];
+ int rc;
+
+ lun.bdev = &bdev;
+
+ ut_init_task(&task);
+ task.lun = &lun;
+ task.lun->bdev_desc = NULL;
+ task.lun->io_channel = NULL;
+ task.cdb = cdb;
+
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = 0x88; /* READ (16) */
+
+ /* Test block device size of 4 blocks */
+ g_test_bdev_num_blocks = 4;
+
+ /* LBA = 0, length = 1 (in range) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 1); /* transfer length */
+ task.transfer_len = 1 * 512;
+ rc = spdk_bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == 0xFF);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_bdev_io_queue));
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+
+ /* LBA = 4, length = 1 (LBA out of range) */
+ to_be64(&cdb[2], 4); /* LBA */
+ to_be32(&cdb[10], 1); /* transfer length */
+ task.transfer_len = 1 * 512;
+ rc = spdk_bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_COMPLETE);
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task.sense_data[12] == SPDK_SCSI_ASC_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_bdev_io_queue));
+
+ /* LBA = 0, length = 4 (in range, max valid size) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 4); /* transfer length */
+ task.transfer_len = 4 * 512;
+ task.status = 0xFF;
+ rc = spdk_bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == 0xFF);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_bdev_io_queue));
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+
+ /* LBA = 0, length = 5 (LBA in range, length beyond end of bdev) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 5); /* transfer length */
+ task.transfer_len = 5 * 512;
+ rc = spdk_bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_COMPLETE);
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task.sense_data[12] == SPDK_SCSI_ASC_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_bdev_io_queue));
+
+ ut_put_task(&task);
+}
+
+static void
+xfer_len_test(void)
+{
+ struct spdk_bdev bdev;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_task task;
+ uint8_t cdb[16];
+ int rc;
+
+ lun.bdev = &bdev;
+
+ ut_init_task(&task);
+ task.lun = &lun;
+ task.lun->bdev_desc = NULL;
+ task.lun->io_channel = NULL;
+ task.cdb = cdb;
+
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = 0x88; /* READ (16) */
+
+ /* Test block device size of 512 MiB */
+ g_test_bdev_num_blocks = 512 * 1024 * 1024;
+
+ /* 1 block */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 1); /* transfer length */
+ task.transfer_len = 1 * 512;
+ rc = spdk_bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == 0xFF);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_bdev_io_queue));
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+
+ /* max transfer length (as reported in block limits VPD page) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], SPDK_WORK_BLOCK_SIZE / 512); /* transfer length */
+ task.transfer_len = SPDK_WORK_BLOCK_SIZE;
+ task.status = 0xFF;
+ rc = spdk_bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == 0xFF);
+ SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&g_bdev_io_queue));
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+
+ /* max transfer length plus one block (invalid) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], SPDK_WORK_BLOCK_SIZE / 512 + 1); /* transfer length */
+ task.transfer_len = SPDK_WORK_BLOCK_SIZE + 512;
+ rc = spdk_bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_COMPLETE);
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT((task.sense_data[2] & 0xf) == SPDK_SCSI_SENSE_ILLEGAL_REQUEST);
+ CU_ASSERT(task.sense_data[12] == SPDK_SCSI_ASC_INVALID_FIELD_IN_CDB);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_bdev_io_queue));
+
+ /* zero transfer length (valid) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 0); /* transfer length */
+ task.transfer_len = 0;
+ rc = spdk_bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_COMPLETE);
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(task.data_transferred == 0);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_bdev_io_queue));
+
+ /* zero transfer length past end of disk (invalid) */
+ to_be64(&cdb[2], g_test_bdev_num_blocks); /* LBA */
+ to_be32(&cdb[10], 0); /* transfer length */
+ task.transfer_len = 0;
+ rc = spdk_bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_COMPLETE);
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_CHECK_CONDITION);
+ CU_ASSERT(task.sense_data[12] == SPDK_SCSI_ASC_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE);
+ SPDK_CU_ASSERT_FATAL(TAILQ_EMPTY(&g_bdev_io_queue));
+
+ ut_put_task(&task);
+}
+
+static void
+_xfer_test(bool bdev_io_pool_full)
+{
+ struct spdk_bdev bdev;
+ struct spdk_scsi_lun lun;
+ struct spdk_scsi_task task;
+ uint8_t cdb[16];
+ char data[4096];
+ int rc;
+
+ lun.bdev = &bdev;
+
+ /* Test block device size of 512 MiB */
+ g_test_bdev_num_blocks = 512 * 1024 * 1024;
+
+ /* Read 1 block */
+ ut_init_task(&task);
+ task.lun = &lun;
+ task.lun->bdev_desc = NULL;
+ task.lun->io_channel = NULL;
+ task.cdb = cdb;
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = 0x88; /* READ (16) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 1); /* transfer length */
+ task.transfer_len = 1 * 512;
+ g_bdev_io_pool_full = bdev_io_pool_full;
+ rc = spdk_bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == 0xFF);
+
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+ ut_put_task(&task);
+
+ /* Write 1 block */
+ ut_init_task(&task);
+ task.lun = &lun;
+ task.cdb = cdb;
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = 0x8a; /* WRITE (16) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 1); /* transfer length */
+ task.transfer_len = 1 * 512;
+ g_bdev_io_pool_full = bdev_io_pool_full;
+ rc = spdk_bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == 0xFF);
+
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+ ut_put_task(&task);
+
+ /* Unmap 5 blocks using 2 descriptors */
+ ut_init_task(&task);
+ task.lun = &lun;
+ task.cdb = cdb;
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = 0x42; /* UNMAP */
+ to_be16(&data[7], 2); /* 2 parameters in list */
+ memset(data, 0, sizeof(data));
+ to_be16(&data[2], 32); /* 2 descriptors */
+ to_be64(&data[8], 1); /* LBA 1 */
+ to_be32(&data[16], 2); /* 2 blocks */
+ to_be64(&data[24], 10); /* LBA 10 */
+ to_be32(&data[32], 3); /* 3 blocks */
+ spdk_scsi_task_set_data(&task, data, sizeof(data));
+ task.status = SPDK_SCSI_STATUS_GOOD;
+ g_bdev_io_pool_full = bdev_io_pool_full;
+ rc = spdk_bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+ ut_put_task(&task);
+
+ /* Flush 1 block */
+ ut_init_task(&task);
+ task.lun = &lun;
+ task.cdb = cdb;
+ memset(cdb, 0, sizeof(cdb));
+ cdb[0] = 0x91; /* SYNCHRONIZE CACHE (16) */
+ to_be64(&cdb[2], 0); /* LBA */
+ to_be32(&cdb[10], 1); /* 1 blocks */
+ g_bdev_io_pool_full = bdev_io_pool_full;
+ rc = spdk_bdev_scsi_execute(&task);
+ CU_ASSERT(rc == SPDK_SCSI_TASK_PENDING);
+ CU_ASSERT(task.status == 0xFF);
+
+ ut_bdev_io_flush();
+ CU_ASSERT(task.status == SPDK_SCSI_STATUS_GOOD);
+ CU_ASSERT(g_scsi_cb_called == 1);
+ g_scsi_cb_called = 0;
+ ut_put_task(&task);
+}
+
+static void
+xfer_test(void)
+{
+ _xfer_test(false);
+ _xfer_test(true);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ TAILQ_INIT(&g_bdev_io_queue);
+ TAILQ_INIT(&g_io_wait_queue);
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("translation_suite", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "mode select 6 test", mode_select_6_test) == NULL
+ || CU_add_test(suite, "mode select 6 test2", mode_select_6_test2) == NULL
+ || CU_add_test(suite, "mode sense 6 test", mode_sense_6_test) == NULL
+ || CU_add_test(suite, "mode sense 10 test", mode_sense_10_test) == NULL
+ || CU_add_test(suite, "inquiry evpd test", inquiry_evpd_test) == NULL
+ || CU_add_test(suite, "inquiry standard test", inquiry_standard_test) == NULL
+ || CU_add_test(suite, "inquiry overflow test", inquiry_overflow_test) == NULL
+ || CU_add_test(suite, "task complete test", task_complete_test) == NULL
+ || CU_add_test(suite, "LBA range test", lba_range_test) == NULL
+ || CU_add_test(suite, "transfer length test", xfer_len_test) == NULL
+ || CU_add_test(suite, "transfer test", xfer_test) == NULL
+ || CU_add_test(suite, "scsi name padding test", scsi_name_padding_test) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/sock/Makefile b/src/spdk/test/unit/lib/sock/Makefile
new file mode 100644
index 00000000..5e16429d
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = sock.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/sock/sock.c/.gitignore b/src/spdk/test/unit/lib/sock/sock.c/.gitignore
new file mode 100644
index 00000000..bd9bf833
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/sock.c/.gitignore
@@ -0,0 +1 @@
+sock_ut
diff --git a/src/spdk/test/unit/lib/sock/sock.c/Makefile b/src/spdk/test/unit/lib/sock/sock.c/Makefile
new file mode 100644
index 00000000..845c9ade
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/sock.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = sock_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/sock/sock.c/sock_ut.c b/src/spdk/test/unit/lib/sock/sock.c/sock_ut.c
new file mode 100644
index 00000000..a0176f11
--- /dev/null
+++ b/src/spdk/test/unit/lib/sock/sock.c/sock_ut.c
@@ -0,0 +1,643 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk/util.h"
+
+#include "spdk_cunit.h"
+
+#include "sock/sock.c"
+#include "sock/posix/posix.c"
+
+#define UT_IP "test_ip"
+#define UT_PORT 1234
+
+bool g_read_data_called;
+ssize_t g_bytes_read;
+char g_buf[256];
+struct spdk_sock *g_server_sock_read;
+int g_ut_accept_count;
+struct spdk_ut_sock *g_ut_listen_sock;
+struct spdk_ut_sock *g_ut_client_sock;
+
+struct spdk_ut_sock {
+ struct spdk_sock base;
+ struct spdk_ut_sock *peer;
+ size_t bytes_avail;
+ char buf[256];
+};
+
+struct spdk_ut_sock_group_impl {
+ struct spdk_sock_group_impl base;
+ struct spdk_ut_sock *sock;
+};
+
+#define __ut_sock(sock) (struct spdk_ut_sock *)sock
+#define __ut_group(group) (struct spdk_ut_sock_group_impl *)group
+
+static int
+spdk_ut_sock_getaddr(struct spdk_sock *_sock, char *saddr, int slen, uint16_t *sport,
+ char *caddr, int clen, uint16_t *cport)
+{
+ return 0;
+}
+
+static struct spdk_sock *
+spdk_ut_sock_listen(const char *ip, int port)
+{
+ struct spdk_ut_sock *sock;
+
+ if (strcmp(ip, UT_IP) || port != UT_PORT) {
+ return NULL;
+ }
+
+ CU_ASSERT(g_ut_listen_sock == NULL);
+
+ sock = calloc(1, sizeof(*sock));
+ SPDK_CU_ASSERT_FATAL(sock != NULL);
+ g_ut_listen_sock = sock;
+
+ return &sock->base;
+}
+
+static struct spdk_sock *
+spdk_ut_sock_connect(const char *ip, int port)
+{
+ struct spdk_ut_sock *sock;
+
+ if (strcmp(ip, UT_IP) || port != UT_PORT) {
+ return NULL;
+ }
+
+ sock = calloc(1, sizeof(*sock));
+ SPDK_CU_ASSERT_FATAL(sock != NULL);
+ g_ut_accept_count++;
+ CU_ASSERT(g_ut_client_sock == NULL);
+ g_ut_client_sock = sock;
+
+ return &sock->base;
+}
+
+static struct spdk_sock *
+spdk_ut_sock_accept(struct spdk_sock *_sock)
+{
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+ struct spdk_ut_sock *new_sock;
+
+ CU_ASSERT(sock == g_ut_listen_sock);
+
+ if (g_ut_accept_count == 0) {
+ errno = EAGAIN;
+ return NULL;
+ }
+
+ g_ut_accept_count--;
+ new_sock = calloc(1, sizeof(*sock));
+ if (new_sock == NULL) {
+ SPDK_ERRLOG("sock allocation failed\n");
+ return NULL;
+ }
+
+ SPDK_CU_ASSERT_FATAL(g_ut_client_sock != NULL);
+ g_ut_client_sock->peer = new_sock;
+
+ return &new_sock->base;
+}
+
+static int
+spdk_ut_sock_close(struct spdk_sock *_sock)
+{
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+
+ if (sock == g_ut_listen_sock) {
+ g_ut_listen_sock = NULL;
+ }
+ if (sock == g_ut_client_sock) {
+ g_ut_client_sock = NULL;
+ }
+ free(_sock);
+
+ return 0;
+}
+
+static ssize_t
+spdk_ut_sock_recv(struct spdk_sock *_sock, void *buf, size_t len)
+{
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+ char tmp[256];
+
+ len = spdk_min(len, sock->bytes_avail);
+
+ if (len == 0) {
+ errno = EAGAIN;
+ return -1;
+ }
+
+ memcpy(buf, sock->buf, len);
+ memcpy(tmp, &sock->buf[len], sock->bytes_avail - len);
+ memcpy(sock->buf, tmp, sock->bytes_avail - len);
+ sock->bytes_avail -= len;
+
+ return len;
+}
+
+static ssize_t
+spdk_ut_sock_writev(struct spdk_sock *_sock, struct iovec *iov, int iovcnt)
+{
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+ struct spdk_ut_sock *peer;
+
+ SPDK_CU_ASSERT_FATAL(sock->peer != NULL);
+ peer = sock->peer;
+
+ /* Test implementation only supports single iov for now. */
+ CU_ASSERT(iovcnt == 1);
+
+ memcpy(&peer->buf[peer->bytes_avail], iov[0].iov_base, iov[0].iov_len);
+ peer->bytes_avail += iov[0].iov_len;
+
+ return iov[0].iov_len;
+}
+
+static int
+spdk_ut_sock_set_recvlowat(struct spdk_sock *_sock, int nbytes)
+{
+ return 0;
+}
+
+static int
+spdk_ut_sock_set_recvbuf(struct spdk_sock *_sock, int sz)
+{
+ return 0;
+}
+
+static int
+spdk_ut_sock_set_sendbuf(struct spdk_sock *_sock, int sz)
+{
+ return 0;
+}
+
+static bool
+spdk_ut_sock_is_ipv6(struct spdk_sock *_sock)
+{
+ return false;
+}
+
+static bool
+spdk_ut_sock_is_ipv4(struct spdk_sock *_sock)
+{
+ return true;
+}
+
+static struct spdk_sock_group_impl *
+spdk_ut_sock_group_impl_create(void)
+{
+ struct spdk_ut_sock_group_impl *group_impl;
+
+ group_impl = calloc(1, sizeof(*group_impl));
+ SPDK_CU_ASSERT_FATAL(group_impl != NULL);
+
+ return &group_impl->base;
+}
+
+static int
+spdk_ut_sock_group_impl_add_sock(struct spdk_sock_group_impl *_group, struct spdk_sock *_sock)
+{
+ struct spdk_ut_sock_group_impl *group = __ut_group(_group);
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+
+ group->sock = sock;
+
+ return 0;
+}
+
+static int
+spdk_ut_sock_group_impl_remove_sock(struct spdk_sock_group_impl *_group, struct spdk_sock *_sock)
+{
+ struct spdk_ut_sock_group_impl *group = __ut_group(_group);
+ struct spdk_ut_sock *sock = __ut_sock(_sock);
+
+ CU_ASSERT(group->sock == sock);
+ group->sock = NULL;
+
+ return 0;
+}
+
+static int
+spdk_ut_sock_group_impl_poll(struct spdk_sock_group_impl *_group, int max_events,
+ struct spdk_sock **socks)
+{
+ struct spdk_ut_sock_group_impl *group = __ut_group(_group);
+
+ if (group->sock != NULL && group->sock->bytes_avail > 0) {
+ socks[0] = &group->sock->base;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+spdk_ut_sock_group_impl_close(struct spdk_sock_group_impl *_group)
+{
+ struct spdk_ut_sock_group_impl *group = __ut_group(_group);
+
+ CU_ASSERT(group->sock == NULL);
+
+ return 0;
+}
+
+static struct spdk_net_impl g_ut_net_impl = {
+ .name = "ut",
+ .getaddr = spdk_ut_sock_getaddr,
+ .connect = spdk_ut_sock_connect,
+ .listen = spdk_ut_sock_listen,
+ .accept = spdk_ut_sock_accept,
+ .close = spdk_ut_sock_close,
+ .recv = spdk_ut_sock_recv,
+ .writev = spdk_ut_sock_writev,
+ .set_recvlowat = spdk_ut_sock_set_recvlowat,
+ .set_recvbuf = spdk_ut_sock_set_recvbuf,
+ .set_sendbuf = spdk_ut_sock_set_sendbuf,
+ .is_ipv6 = spdk_ut_sock_is_ipv6,
+ .is_ipv4 = spdk_ut_sock_is_ipv4,
+ .group_impl_create = spdk_ut_sock_group_impl_create,
+ .group_impl_add_sock = spdk_ut_sock_group_impl_add_sock,
+ .group_impl_remove_sock = spdk_ut_sock_group_impl_remove_sock,
+ .group_impl_poll = spdk_ut_sock_group_impl_poll,
+ .group_impl_close = spdk_ut_sock_group_impl_close,
+};
+
+SPDK_NET_IMPL_REGISTER(ut, &g_ut_net_impl);
+
+static void
+_sock(const char *ip, int port)
+{
+ struct spdk_sock *listen_sock;
+ struct spdk_sock *server_sock;
+ struct spdk_sock *client_sock;
+ char *test_string = "abcdef";
+ char buffer[64];
+ ssize_t bytes_read, bytes_written;
+ struct iovec iov;
+ int rc;
+
+ listen_sock = spdk_sock_listen(ip, port);
+ SPDK_CU_ASSERT_FATAL(listen_sock != NULL);
+
+ server_sock = spdk_sock_accept(listen_sock);
+ CU_ASSERT(server_sock == NULL);
+ CU_ASSERT(errno == EAGAIN || errno == EWOULDBLOCK);
+
+ client_sock = spdk_sock_connect(ip, port);
+ SPDK_CU_ASSERT_FATAL(client_sock != NULL);
+
+ /*
+ * Delay a bit here before checking if server socket is
+ * ready.
+ */
+ usleep(1000);
+
+ server_sock = spdk_sock_accept(listen_sock);
+ SPDK_CU_ASSERT_FATAL(server_sock != NULL);
+
+ iov.iov_base = test_string;
+ iov.iov_len = 7;
+ bytes_written = spdk_sock_writev(client_sock, &iov, 1);
+ CU_ASSERT(bytes_written == 7);
+
+ usleep(1000);
+
+ bytes_read = spdk_sock_recv(server_sock, buffer, 2);
+ CU_ASSERT(bytes_read == 2);
+
+ usleep(1000);
+
+ bytes_read += spdk_sock_recv(server_sock, buffer + 2, 5);
+ CU_ASSERT(bytes_read == 7);
+
+ CU_ASSERT(strncmp(test_string, buffer, 7) == 0);
+
+ rc = spdk_sock_close(&client_sock);
+ CU_ASSERT(client_sock == NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&server_sock);
+ CU_ASSERT(server_sock == NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&listen_sock);
+ CU_ASSERT(listen_sock == NULL);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+posix_sock(void)
+{
+ _sock("127.0.0.1", 3260);
+}
+
+static void
+ut_sock(void)
+{
+ _sock(UT_IP, UT_PORT);
+}
+
+static void
+read_data(void *cb_arg, struct spdk_sock_group *group, struct spdk_sock *sock)
+{
+ struct spdk_sock *server_sock = cb_arg;
+
+ CU_ASSERT(server_sock == sock);
+
+ g_read_data_called = true;
+ g_bytes_read += spdk_sock_recv(server_sock, g_buf + g_bytes_read, sizeof(g_buf) - g_bytes_read);
+}
+
+static void
+_sock_group(const char *ip, int port)
+{
+ struct spdk_sock_group *group;
+ struct spdk_sock *listen_sock;
+ struct spdk_sock *server_sock;
+ struct spdk_sock *client_sock;
+ char *test_string = "abcdef";
+ ssize_t bytes_written;
+ struct iovec iov;
+ int rc;
+
+ listen_sock = spdk_sock_listen(ip, port);
+ SPDK_CU_ASSERT_FATAL(listen_sock != NULL);
+
+ server_sock = spdk_sock_accept(listen_sock);
+ CU_ASSERT(server_sock == NULL);
+ CU_ASSERT(errno == EAGAIN || errno == EWOULDBLOCK);
+
+ client_sock = spdk_sock_connect(ip, port);
+ SPDK_CU_ASSERT_FATAL(client_sock != NULL);
+
+ usleep(1000);
+
+ server_sock = spdk_sock_accept(listen_sock);
+ SPDK_CU_ASSERT_FATAL(server_sock != NULL);
+
+ group = spdk_sock_group_create();
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+
+ /* pass null cb_fn */
+ rc = spdk_sock_group_add_sock(group, server_sock, NULL, NULL);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EINVAL);
+
+ rc = spdk_sock_group_add_sock(group, server_sock, read_data, server_sock);
+ CU_ASSERT(rc == 0);
+
+ /* try adding sock a second time */
+ rc = spdk_sock_group_add_sock(group, server_sock, read_data, server_sock);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EBUSY);
+
+ g_read_data_called = false;
+ g_bytes_read = 0;
+ rc = spdk_sock_group_poll(group);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_read_data_called == false);
+
+ iov.iov_base = test_string;
+ iov.iov_len = 7;
+ bytes_written = spdk_sock_writev(client_sock, &iov, 1);
+ CU_ASSERT(bytes_written == 7);
+
+ usleep(1000);
+
+ g_read_data_called = false;
+ g_bytes_read = 0;
+ rc = spdk_sock_group_poll(group);
+
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_read_data_called == true);
+ CU_ASSERT(g_bytes_read == 7);
+
+ CU_ASSERT(strncmp(test_string, g_buf, 7) == 0);
+
+ rc = spdk_sock_close(&client_sock);
+ CU_ASSERT(client_sock == NULL);
+ CU_ASSERT(rc == 0);
+
+ /* Try to close sock_group while it still has sockets. */
+ rc = spdk_sock_group_close(&group);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EBUSY);
+
+ /* Try to close sock while it is still part of a sock_group. */
+ rc = spdk_sock_close(&server_sock);
+ CU_ASSERT(rc == -1);
+ CU_ASSERT(errno == EBUSY);
+
+ rc = spdk_sock_group_remove_sock(group, server_sock);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_group_close(&group);
+ CU_ASSERT(group == NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&server_sock);
+ CU_ASSERT(server_sock == NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&listen_sock);
+ CU_ASSERT(listen_sock == NULL);
+ CU_ASSERT(rc == 0);
+}
+
+static void
+posix_sock_group(void)
+{
+ _sock_group("127.0.0.1", 3260);
+}
+
+static void
+ut_sock_group(void)
+{
+ _sock_group(UT_IP, UT_PORT);
+}
+
+static void
+read_data_fairness(void *cb_arg, struct spdk_sock_group *group, struct spdk_sock *sock)
+{
+ struct spdk_sock *server_sock = cb_arg;
+ ssize_t bytes_read;
+ char buf[1];
+
+ CU_ASSERT(g_server_sock_read == NULL);
+ CU_ASSERT(server_sock == sock);
+
+ g_server_sock_read = server_sock;
+ bytes_read = spdk_sock_recv(server_sock, buf, 1);
+ CU_ASSERT(bytes_read == 1);
+}
+
+static void
+posix_sock_group_fairness(void)
+{
+ struct spdk_sock_group *group;
+ struct spdk_sock *listen_sock;
+ struct spdk_sock *server_sock[3];
+ struct spdk_sock *client_sock[3];
+ char test_char = 'a';
+ ssize_t bytes_written;
+ struct iovec iov;
+ int i, rc;
+
+ listen_sock = spdk_sock_listen("127.0.0.1", 3260);
+ SPDK_CU_ASSERT_FATAL(listen_sock != NULL);
+
+ group = spdk_sock_group_create();
+ SPDK_CU_ASSERT_FATAL(group != NULL);
+
+ for (i = 0; i < 3; i++) {
+ client_sock[i] = spdk_sock_connect("127.0.0.1", 3260);
+ SPDK_CU_ASSERT_FATAL(client_sock[i] != NULL);
+
+ usleep(1000);
+
+ server_sock[i] = spdk_sock_accept(listen_sock);
+ SPDK_CU_ASSERT_FATAL(server_sock[i] != NULL);
+
+ rc = spdk_sock_group_add_sock(group, server_sock[i],
+ read_data_fairness, server_sock[i]);
+ CU_ASSERT(rc == 0);
+ }
+
+ iov.iov_base = &test_char;
+ iov.iov_len = 1;
+
+ for (i = 0; i < 3; i++) {
+ bytes_written = spdk_sock_writev(client_sock[i], &iov, 1);
+ CU_ASSERT(bytes_written == 1);
+ }
+
+ usleep(1000);
+
+ /*
+ * Poll for just one event - this should be server sock 0, since that
+ * is the peer of the first client sock that we wrote to.
+ */
+ g_server_sock_read = NULL;
+ rc = spdk_sock_group_poll_count(group, 1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_server_sock_read == server_sock[0]);
+
+ /*
+ * Now write another byte to client sock 0. We want to ensure that
+ * the sock group does not unfairly process the event for this sock
+ * before the socks that were written to earlier.
+ */
+ bytes_written = spdk_sock_writev(client_sock[0], &iov, 1);
+ CU_ASSERT(bytes_written == 1);
+
+ usleep(1000);
+
+ g_server_sock_read = NULL;
+ rc = spdk_sock_group_poll_count(group, 1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_server_sock_read == server_sock[1]);
+
+ g_server_sock_read = NULL;
+ rc = spdk_sock_group_poll_count(group, 1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_server_sock_read == server_sock[2]);
+
+ g_server_sock_read = NULL;
+ rc = spdk_sock_group_poll_count(group, 1);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(g_server_sock_read == server_sock[0]);
+
+ for (i = 0; i < 3; i++) {
+ rc = spdk_sock_group_remove_sock(group, server_sock[i]);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&client_sock[i]);
+ CU_ASSERT(client_sock[i] == NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&server_sock[i]);
+ CU_ASSERT(server_sock[i] == NULL);
+ CU_ASSERT(rc == 0);
+ }
+
+ rc = spdk_sock_group_close(&group);
+ CU_ASSERT(group == NULL);
+ CU_ASSERT(rc == 0);
+
+ rc = spdk_sock_close(&listen_sock);
+ CU_ASSERT(listen_sock == NULL);
+ CU_ASSERT(rc == 0);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("sock", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "posix_sock", posix_sock) == NULL ||
+ CU_add_test(suite, "ut_sock", ut_sock) == NULL ||
+ CU_add_test(suite, "posix_sock_group", posix_sock_group) == NULL ||
+ CU_add_test(suite, "ut_sock_group", ut_sock_group) == NULL ||
+ CU_add_test(suite, "posix_sock_group_fairness", posix_sock_group_fairness) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/thread/Makefile b/src/spdk/test/unit/lib/thread/Makefile
new file mode 100644
index 00000000..d7381694
--- /dev/null
+++ b/src/spdk/test/unit/lib/thread/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = thread.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/thread/thread.c/.gitignore b/src/spdk/test/unit/lib/thread/thread.c/.gitignore
new file mode 100644
index 00000000..1a165acb
--- /dev/null
+++ b/src/spdk/test/unit/lib/thread/thread.c/.gitignore
@@ -0,0 +1 @@
+thread_ut
diff --git a/src/spdk/test/unit/lib/thread/thread.c/Makefile b/src/spdk/test/unit/lib/thread/thread.c/Makefile
new file mode 100644
index 00000000..23cfa45a
--- /dev/null
+++ b/src/spdk/test/unit/lib/thread/thread.c/Makefile
@@ -0,0 +1,41 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.mock.unittest.mk
+
+TEST_FILE = thread_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/thread/thread.c/thread_ut.c b/src/spdk/test/unit/lib/thread/thread.c/thread_ut.c
new file mode 100644
index 00000000..464e430f
--- /dev/null
+++ b/src/spdk/test/unit/lib/thread/thread.c/thread_ut.c
@@ -0,0 +1,501 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "thread/thread.c"
+#include "common/lib/test_env.c"
+#include "common/lib/ut_multithread.c"
+
+static void
+_send_msg(spdk_thread_fn fn, void *ctx, void *thread_ctx)
+{
+ fn(ctx);
+}
+
+static void
+thread_alloc(void)
+{
+ CU_ASSERT(TAILQ_EMPTY(&g_threads));
+ allocate_threads(1);
+ CU_ASSERT(!TAILQ_EMPTY(&g_threads));
+ free_threads();
+ CU_ASSERT(TAILQ_EMPTY(&g_threads));
+}
+
+static void
+send_msg_cb(void *ctx)
+{
+ bool *done = ctx;
+
+ *done = true;
+}
+
+static void
+thread_send_msg(void)
+{
+ struct spdk_thread *thread0;
+ bool done = false;
+
+ allocate_threads(2);
+ set_thread(0);
+ thread0 = spdk_get_thread();
+
+ set_thread(1);
+ /* Simulate thread 1 sending a message to thread 0. */
+ spdk_thread_send_msg(thread0, send_msg_cb, &done);
+
+ /* We have not polled thread 0 yet, so done should be false. */
+ CU_ASSERT(!done);
+
+ /*
+ * Poll thread 1. The message was sent to thread 0, so this should be
+ * a nop and done should still be false.
+ */
+ poll_thread(1);
+ CU_ASSERT(!done);
+
+ /*
+ * Poll thread 0. This should execute the message and done should then
+ * be true.
+ */
+ poll_thread(0);
+ CU_ASSERT(done);
+
+ free_threads();
+}
+
+static int
+poller_run_done(void *ctx)
+{
+ bool *poller_run = ctx;
+
+ *poller_run = true;
+
+ return -1;
+}
+
+static void
+thread_poller(void)
+{
+ struct spdk_poller *poller = NULL;
+ bool poller_run = false;
+
+ allocate_threads(1);
+
+ set_thread(0);
+ reset_time();
+ /* Register a poller with no-wait time and test execution */
+ poller = spdk_poller_register(poller_run_done, &poller_run, 0);
+ CU_ASSERT(poller != NULL);
+
+ poll_threads();
+ CU_ASSERT(poller_run == true);
+
+ spdk_poller_unregister(&poller);
+ CU_ASSERT(poller == NULL);
+
+ /* Register a poller with 1000us wait time and test single execution */
+ poller_run = false;
+ poller = spdk_poller_register(poller_run_done, &poller_run, 1000);
+ CU_ASSERT(poller != NULL);
+
+ poll_threads();
+ CU_ASSERT(poller_run == false);
+
+ increment_time(1000);
+ poll_threads();
+ CU_ASSERT(poller_run == true);
+
+ reset_time();
+ poller_run = false;
+ poll_threads();
+ CU_ASSERT(poller_run == false);
+
+ increment_time(1000);
+ poll_threads();
+ CU_ASSERT(poller_run == true);
+
+ spdk_poller_unregister(&poller);
+ CU_ASSERT(poller == NULL);
+
+ free_threads();
+}
+
+static void
+for_each_cb(void *ctx)
+{
+ int *count = ctx;
+
+ (*count)++;
+}
+
+static void
+thread_for_each(void)
+{
+ int count = 0;
+ int i;
+
+ allocate_threads(3);
+ set_thread(0);
+
+ spdk_for_each_thread(for_each_cb, &count, for_each_cb);
+
+ /* We have not polled thread 0 yet, so count should be 0 */
+ CU_ASSERT(count == 0);
+
+ /* Poll each thread to verify the message is passed to each */
+ for (i = 0; i < 3; i++) {
+ poll_thread(i);
+ CU_ASSERT(count == (i + 1));
+ }
+
+ /*
+ * After each thread is called, the completion calls it
+ * one more time.
+ */
+ poll_thread(0);
+ CU_ASSERT(count == 4);
+
+ free_threads();
+}
+
+static int
+channel_create(void *io_device, void *ctx_buf)
+{
+ return 0;
+}
+
+static void
+channel_destroy(void *io_device, void *ctx_buf)
+{
+}
+
+static void
+channel_msg(struct spdk_io_channel_iter *i)
+{
+ struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i);
+ int *count = spdk_io_channel_get_ctx(ch);
+
+ (*count)++;
+
+ spdk_for_each_channel_continue(i, 0);
+}
+
+static void
+channel_cpl(struct spdk_io_channel_iter *i, int status)
+{
+}
+
+static void
+for_each_channel_remove(void)
+{
+ struct spdk_io_channel *ch0, *ch1, *ch2;
+ int io_target;
+ int count = 0;
+
+ allocate_threads(3);
+ spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
+ set_thread(0);
+ ch0 = spdk_get_io_channel(&io_target);
+ set_thread(1);
+ ch1 = spdk_get_io_channel(&io_target);
+ set_thread(2);
+ ch2 = spdk_get_io_channel(&io_target);
+
+ /*
+ * Test that io_channel handles the case where we start to iterate through
+ * the channels, and during the iteration, one of the channels is deleted.
+ * This is done in some different and sometimes non-intuitive orders, because
+ * some operations are deferred and won't execute until their threads are
+ * polled.
+ *
+ * Case #1: Put the I/O channel before spdk_for_each_channel.
+ */
+ set_thread(0);
+ spdk_put_io_channel(ch0);
+ spdk_for_each_channel(&io_target, channel_msg, &count, channel_cpl);
+ poll_threads();
+
+ /*
+ * Case #2: Put the I/O channel after spdk_for_each_channel, but before
+ * thread 0 is polled.
+ */
+ ch0 = spdk_get_io_channel(&io_target);
+ spdk_for_each_channel(&io_target, channel_msg, &count, channel_cpl);
+ spdk_put_io_channel(ch0);
+ poll_threads();
+
+ set_thread(1);
+ spdk_put_io_channel(ch1);
+ set_thread(2);
+ spdk_put_io_channel(ch2);
+ spdk_io_device_unregister(&io_target, NULL);
+ poll_threads();
+
+ free_threads();
+}
+
+struct unreg_ctx {
+ bool ch_done;
+ bool foreach_done;
+};
+
+static void
+unreg_ch_done(struct spdk_io_channel_iter *i)
+{
+ struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
+
+ ctx->ch_done = true;
+
+ SPDK_CU_ASSERT_FATAL(i->cur_thread != NULL);
+ spdk_for_each_channel_continue(i, 0);
+}
+
+static void
+unreg_foreach_done(struct spdk_io_channel_iter *i, int status)
+{
+ struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
+
+ ctx->foreach_done = true;
+}
+
+static void
+for_each_channel_unreg(void)
+{
+ struct spdk_io_channel *ch0;
+ struct io_device *dev;
+ struct unreg_ctx ctx = {};
+ int io_target;
+
+ allocate_threads(1);
+ CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
+ spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
+ CU_ASSERT(!TAILQ_EMPTY(&g_io_devices));
+ dev = TAILQ_FIRST(&g_io_devices);
+ SPDK_CU_ASSERT_FATAL(dev != NULL);
+ CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
+ set_thread(0);
+ ch0 = spdk_get_io_channel(&io_target);
+ spdk_for_each_channel(&io_target, unreg_ch_done, &ctx, unreg_foreach_done);
+
+ spdk_io_device_unregister(&io_target, NULL);
+ /*
+ * There is an outstanding foreach call on the io_device, so the unregister should not
+ * have removed the device.
+ */
+ CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
+ spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL);
+ /*
+ * There is already a device registered at &io_target, so a new io_device should not
+ * have been added to g_io_devices.
+ */
+ CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices));
+ CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL);
+
+ poll_thread(0);
+ CU_ASSERT(ctx.ch_done == true);
+ CU_ASSERT(ctx.foreach_done == true);
+ /*
+ * There are no more foreach operations outstanding, so we can unregister the device,
+ * even though a channel still exists for the device.
+ */
+ spdk_io_device_unregister(&io_target, NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
+
+ set_thread(0);
+ spdk_put_io_channel(ch0);
+
+ poll_threads();
+
+ free_threads();
+}
+
+static void
+thread_name(void)
+{
+ struct spdk_thread *thread;
+ const char *name;
+
+ /* Create thread with no name, which automatically generates one */
+ spdk_allocate_thread(_send_msg, NULL, NULL, NULL, NULL);
+ thread = spdk_get_thread();
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ name = spdk_thread_get_name(thread);
+ CU_ASSERT(name != NULL);
+ spdk_free_thread();
+
+ /* Create thread named "test_thread" */
+ spdk_allocate_thread(_send_msg, NULL, NULL, NULL, "test_thread");
+ thread = spdk_get_thread();
+ SPDK_CU_ASSERT_FATAL(thread != NULL);
+ name = spdk_thread_get_name(thread);
+ SPDK_CU_ASSERT_FATAL(name != NULL);
+ CU_ASSERT(strcmp(name, "test_thread") == 0);
+ spdk_free_thread();
+}
+
+static uint64_t device1;
+static uint64_t device2;
+static uint64_t device3;
+
+static uint64_t ctx1 = 0x1111;
+static uint64_t ctx2 = 0x2222;
+
+static int g_create_cb_calls = 0;
+static int g_destroy_cb_calls = 0;
+
+static int
+create_cb_1(void *io_device, void *ctx_buf)
+{
+ CU_ASSERT(io_device == &device1);
+ *(uint64_t *)ctx_buf = ctx1;
+ g_create_cb_calls++;
+ return 0;
+}
+
+static void
+destroy_cb_1(void *io_device, void *ctx_buf)
+{
+ CU_ASSERT(io_device == &device1);
+ CU_ASSERT(*(uint64_t *)ctx_buf == ctx1);
+ g_destroy_cb_calls++;
+}
+
+static int
+create_cb_2(void *io_device, void *ctx_buf)
+{
+ CU_ASSERT(io_device == &device2);
+ *(uint64_t *)ctx_buf = ctx2;
+ g_create_cb_calls++;
+ return 0;
+}
+
+static void
+destroy_cb_2(void *io_device, void *ctx_buf)
+{
+ CU_ASSERT(io_device == &device2);
+ CU_ASSERT(*(uint64_t *)ctx_buf == ctx2);
+ g_destroy_cb_calls++;
+}
+
+static void
+channel(void)
+{
+ struct spdk_io_channel *ch1, *ch2;
+ void *ctx;
+
+ spdk_allocate_thread(_send_msg, NULL, NULL, NULL, "thread0");
+ spdk_io_device_register(&device1, create_cb_1, destroy_cb_1, sizeof(ctx1), NULL);
+ spdk_io_device_register(&device2, create_cb_2, destroy_cb_2, sizeof(ctx2), NULL);
+
+ g_create_cb_calls = 0;
+ ch1 = spdk_get_io_channel(&device1);
+ CU_ASSERT(g_create_cb_calls == 1);
+ SPDK_CU_ASSERT_FATAL(ch1 != NULL);
+
+ g_create_cb_calls = 0;
+ ch2 = spdk_get_io_channel(&device1);
+ CU_ASSERT(g_create_cb_calls == 0);
+ CU_ASSERT(ch1 == ch2);
+ SPDK_CU_ASSERT_FATAL(ch2 != NULL);
+
+ g_destroy_cb_calls = 0;
+ spdk_put_io_channel(ch2);
+ CU_ASSERT(g_destroy_cb_calls == 0);
+
+ g_create_cb_calls = 0;
+ ch2 = spdk_get_io_channel(&device2);
+ CU_ASSERT(g_create_cb_calls == 1);
+ CU_ASSERT(ch1 != ch2);
+ SPDK_CU_ASSERT_FATAL(ch2 != NULL);
+
+ ctx = spdk_io_channel_get_ctx(ch2);
+ CU_ASSERT(*(uint64_t *)ctx == ctx2);
+
+ g_destroy_cb_calls = 0;
+ spdk_put_io_channel(ch1);
+ CU_ASSERT(g_destroy_cb_calls == 1);
+
+ g_destroy_cb_calls = 0;
+ spdk_put_io_channel(ch2);
+ CU_ASSERT(g_destroy_cb_calls == 1);
+
+ ch1 = spdk_get_io_channel(&device3);
+ CU_ASSERT(ch1 == NULL);
+
+ spdk_io_device_unregister(&device1, NULL);
+ spdk_io_device_unregister(&device2, NULL);
+ CU_ASSERT(TAILQ_EMPTY(&g_io_devices));
+ spdk_free_thread();
+ CU_ASSERT(TAILQ_EMPTY(&g_threads));
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("io_channel", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "thread_alloc", thread_alloc) == NULL ||
+ CU_add_test(suite, "thread_send_msg", thread_send_msg) == NULL ||
+ CU_add_test(suite, "thread_poller", thread_poller) == NULL ||
+ CU_add_test(suite, "thread_for_each", thread_for_each) == NULL ||
+ CU_add_test(suite, "for_each_channel_remove", for_each_channel_remove) == NULL ||
+ CU_add_test(suite, "for_each_channel_unreg", for_each_channel_unreg) == NULL ||
+ CU_add_test(suite, "thread_name", thread_name) == NULL ||
+ CU_add_test(suite, "channel", channel) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/Makefile b/src/spdk/test/unit/lib/util/Makefile
new file mode 100644
index 00000000..4813e63b
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = base64.c bit_array.c cpuset.c crc16.c crc32_ieee.c crc32c.c string.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/util/base64.c/.gitignore b/src/spdk/test/unit/lib/util/base64.c/.gitignore
new file mode 100644
index 00000000..a5b17523
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/base64.c/.gitignore
@@ -0,0 +1 @@
+base64_ut
diff --git a/src/spdk/test/unit/lib/util/base64.c/Makefile b/src/spdk/test/unit/lib/util/base64.c/Makefile
new file mode 100644
index 00000000..ff6c9214
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/base64.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = base64_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/base64.c/base64_ut.c b/src/spdk/test/unit/lib/util/base64.c/base64_ut.c
new file mode 100644
index 00000000..652a1e94
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/base64.c/base64_ut.c
@@ -0,0 +1,268 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/base64.c"
+
+char text_A[] = "FZB3";
+uint8_t raw_A[] = {0x15, 0x90, 0x77};
+char text_B[] = "AbC/1+c=";
+char text_urlsafe_B[] = "AbC_1-c=";
+uint8_t raw_B[] = {0x01, 0xB0, 0xBF, 0xD7, 0xE7};
+char text_C[] = "AbC/1+cC";
+char text_urlsafe_C[] = "AbC_1-cC";
+uint8_t raw_C[] = {0x01, 0xB0, 0xBF, 0xD7, 0xE7, 0x02};
+char text_D[] = "AbC/1w==";
+char text_urlsafe_D[] = "AbC_1w==";
+uint8_t raw_D[] = {0x01, 0xB0, 0xBF, 0xD7};
+char text_E[] = "AbC12===";
+char text_F[] = "AbCd112";
+char text_G[] = "AbCd12";
+char text_H[] = "AbC12";
+
+static void
+test_base64_get_encoded_strlen(void)
+{
+ uint32_t raw_lens[4] = {8, 9, 10, 11};
+ uint32_t text_strlens[4] = {12, 12, 16, 16};
+ uint32_t text_strlen;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ text_strlen = spdk_base64_get_encoded_strlen(raw_lens[i]);
+ CU_ASSERT_EQUAL(text_strlen, text_strlens[i]);
+ }
+}
+
+static void
+test_base64_get_decoded_len(void)
+{
+ uint32_t text_strlens[4] = {8, 10, 11, 12};
+ uint32_t raw_lens[4] = {6, 7, 8, 9};
+ uint32_t bin_len;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ bin_len = spdk_base64_get_decoded_len(text_strlens[i]);
+ CU_ASSERT_EQUAL(bin_len, raw_lens[i]);
+ }
+}
+
+static void
+test_base64_encode(void)
+{
+ char text[100];
+ int ret;
+
+ ret = spdk_base64_encode(text, raw_A, sizeof(raw_A));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_A) == 0);
+ CU_ASSERT_EQUAL(strlen(text), strlen(text_A));
+
+ ret = spdk_base64_encode(text, raw_B, sizeof(raw_B));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_B) == 0);
+ CU_ASSERT_EQUAL(strlen(text), strlen(text_B));
+
+ ret = spdk_base64_encode(text, raw_C, sizeof(raw_C));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_C) == 0);
+
+ ret = spdk_base64_encode(text, raw_D, sizeof(raw_D));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_D) == 0);
+
+ ret = spdk_base64_encode(NULL, raw_A, sizeof(raw_A));
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_encode(text, NULL, sizeof(raw_A));
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_encode(text, raw_A, 0);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+}
+
+static void
+test_base64_decode(void)
+{
+ char raw_buf[100];
+ void *raw = (void *)raw_buf;
+ size_t raw_len;
+ int ret;
+
+ ret = spdk_base64_decode(raw, &raw_len, text_A);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_A));
+ CU_ASSERT(memcmp(raw, raw_A, sizeof(raw_A)) == 0);
+
+ ret = spdk_base64_decode(raw, &raw_len, text_B);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_B));
+ CU_ASSERT(memcmp(raw, raw_B, sizeof(raw_B)) == 0);
+
+ ret = spdk_base64_decode(raw, &raw_len, text_C);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_C));
+ CU_ASSERT(memcmp(raw, raw_C, sizeof(raw_C)) == 0);
+
+ ret = spdk_base64_decode(raw, &raw_len, text_D);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_D));
+ CU_ASSERT(memcmp(raw, raw_D, sizeof(raw_D)) == 0);
+
+ ret = spdk_base64_decode(raw, &raw_len, text_E);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_decode(raw, &raw_len, text_F);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_decode(raw, &raw_len, text_G);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_decode(raw, &raw_len, text_H);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_decode(NULL, &raw_len, text_H);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_decode(raw, &raw_len, NULL);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+}
+
+static void
+test_base64_urlsafe_encode(void)
+{
+ char text[100];
+ int ret;
+
+ ret = spdk_base64_urlsafe_encode(text, raw_A, sizeof(raw_A));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_A) == 0);
+ CU_ASSERT_EQUAL(strlen(text), strlen(text_A));
+
+ ret = spdk_base64_urlsafe_encode(text, raw_B, sizeof(raw_B));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_urlsafe_B) == 0);
+ CU_ASSERT_EQUAL(strlen(text), strlen(text_urlsafe_B));
+
+ ret = spdk_base64_urlsafe_encode(text, raw_C, sizeof(raw_C));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_urlsafe_C) == 0);
+
+ ret = spdk_base64_urlsafe_encode(text, raw_D, sizeof(raw_D));
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT(strcmp(text, text_urlsafe_D) == 0);
+
+ ret = spdk_base64_urlsafe_encode(NULL, raw_A, sizeof(raw_A));
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_urlsafe_encode(text, NULL, sizeof(raw_A));
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_urlsafe_encode(text, raw_A, 0);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+}
+
+static void
+test_base64_urlsafe_decode(void)
+{
+ char raw_buf[100];
+ void *raw = (void *)raw_buf;
+ size_t raw_len = 0;
+ int ret;
+
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_A);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_A));
+ CU_ASSERT(memcmp(raw, raw_A, sizeof(raw_A)) == 0);
+
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_urlsafe_B);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_B));
+ CU_ASSERT(memcmp(raw, raw_B, sizeof(raw_B)) == 0);
+
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_urlsafe_C);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_C));
+ CU_ASSERT(memcmp(raw, raw_C, sizeof(raw_C)) == 0);
+
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_urlsafe_D);
+ CU_ASSERT_EQUAL(ret, 0);
+ CU_ASSERT_EQUAL(raw_len, sizeof(raw_D));
+ CU_ASSERT(memcmp(raw, raw_D, sizeof(raw_D)) == 0);
+
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_E);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_F);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_G);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, text_H);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_urlsafe_decode(NULL, &raw_len, text_H);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+ ret = spdk_base64_urlsafe_decode(raw, &raw_len, NULL);
+ CU_ASSERT_EQUAL(ret, -EINVAL);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("base64", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test_base64_get_encoded_strlen", test_base64_get_encoded_strlen) == NULL ||
+ CU_add_test(suite, "test_base64_get_decoded_len",
+ test_base64_get_decoded_len) == NULL ||
+ CU_add_test(suite, "test_base64_encode", test_base64_encode) == NULL ||
+ CU_add_test(suite, "test_base64_decode", test_base64_decode) == NULL ||
+ CU_add_test(suite, "test_base64_urlsafe_encode", test_base64_urlsafe_encode) == NULL ||
+ CU_add_test(suite, "test_base64_urlsafe_decode", test_base64_urlsafe_decode) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/bit_array.c/.gitignore b/src/spdk/test/unit/lib/util/bit_array.c/.gitignore
new file mode 100644
index 00000000..24300cdb
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/bit_array.c/.gitignore
@@ -0,0 +1 @@
+bit_array_ut
diff --git a/src/spdk/test/unit/lib/util/bit_array.c/Makefile b/src/spdk/test/unit/lib/util/bit_array.c/Makefile
new file mode 100644
index 00000000..b7f8e3f6
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/bit_array.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = bit_array_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/bit_array.c/bit_array_ut.c b/src/spdk/test/unit/lib/util/bit_array.c/bit_array_ut.c
new file mode 100644
index 00000000..18d84b94
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/bit_array.c/bit_array_ut.c
@@ -0,0 +1,327 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/bit_array.c"
+
+void *
+spdk_dma_realloc(void *buf, size_t size, size_t align, uint64_t *phys_addr)
+{
+ return realloc(buf, size);
+}
+
+void
+spdk_dma_free(void *buf)
+{
+ free(buf);
+}
+
+static void
+test_1bit(void)
+{
+ struct spdk_bit_array *ba;
+
+ ba = spdk_bit_array_create(1);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 1);
+
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == UINT32_MAX);
+
+ /* Set bit 0 */
+ CU_ASSERT(spdk_bit_array_set(ba, 0) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == true);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == 0);
+
+ /* Clear bit 0 */
+ spdk_bit_array_clear(ba, 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == UINT32_MAX);
+
+ spdk_bit_array_free(&ba);
+ CU_ASSERT(ba == NULL);
+}
+
+static void
+test_64bit(void)
+{
+ struct spdk_bit_array *ba;
+
+ ba = spdk_bit_array_create(64);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 64);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_get(ba, 63) == false);
+ CU_ASSERT(spdk_bit_array_get(ba, 64) == false);
+ CU_ASSERT(spdk_bit_array_get(ba, 1000) == false);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == UINT32_MAX);
+
+ /* Set bit 1 */
+ CU_ASSERT(spdk_bit_array_set(ba, 1) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == true);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == 1);
+
+ /* Set bit 63 (1 still set) */
+ CU_ASSERT(spdk_bit_array_set(ba, 63) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == true);
+ CU_ASSERT(spdk_bit_array_get(ba, 63) == true);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == 1);
+
+ /* Clear bit 1 (63 still set) */
+ spdk_bit_array_clear(ba, 1);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == false);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == 63);
+
+ /* Clear bit 63 (no bits set) */
+ spdk_bit_array_clear(ba, 63);
+ CU_ASSERT(spdk_bit_array_get(ba, 63) == false);
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 0) == UINT32_MAX);
+
+ spdk_bit_array_free(&ba);
+}
+
+static void
+test_find(void)
+{
+ struct spdk_bit_array *ba;
+ uint32_t i;
+
+ ba = spdk_bit_array_create(256);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 256);
+
+ /* Set all bits */
+ for (i = 0; i < 256; i++) {
+ CU_ASSERT(spdk_bit_array_set(ba, i) == 0);
+ }
+
+ /* Verify that find_first_set and find_first_clear work for each starting position */
+ for (i = 0; i < 256; i++) {
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, i) == i);
+ CU_ASSERT(spdk_bit_array_find_first_clear(ba, i) == UINT32_MAX);
+ }
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, 256) == UINT32_MAX);
+ CU_ASSERT(spdk_bit_array_find_first_clear(ba, 256) == UINT32_MAX);
+
+ /* Clear bits 0 through 31 */
+ for (i = 0; i < 32; i++) {
+ spdk_bit_array_clear(ba, i);
+ }
+
+ for (i = 0; i < 32; i++) {
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, i) == 32);
+ CU_ASSERT(spdk_bit_array_find_first_clear(ba, i) == i);
+ }
+
+ for (i = 32; i < 256; i++) {
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, i) == i);
+ CU_ASSERT(spdk_bit_array_find_first_clear(ba, i) == UINT32_MAX);
+ }
+
+ /* Clear bit 255 */
+ spdk_bit_array_clear(ba, 255);
+
+ for (i = 0; i < 32; i++) {
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, i) == 32);
+ CU_ASSERT(spdk_bit_array_find_first_clear(ba, i) == i);
+ }
+
+ for (i = 32; i < 255; i++) {
+ CU_ASSERT(spdk_bit_array_find_first_set(ba, i) == i);
+ CU_ASSERT(spdk_bit_array_find_first_clear(ba, i) == 255);
+ }
+
+ CU_ASSERT(spdk_bit_array_find_first_clear(ba, 256) == UINT32_MAX);
+
+ spdk_bit_array_free(&ba);
+}
+
+static void
+test_resize(void)
+{
+ struct spdk_bit_array *ba;
+
+ /* Start with a 0 bit array */
+ ba = spdk_bit_array_create(0);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_set(ba, 0) == -EINVAL);
+ spdk_bit_array_clear(ba, 0);
+
+ /* Increase size to 1 bit */
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_resize(&ba, 1) == 0);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 1);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_set(ba, 0) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == true);
+
+ /* Increase size to 2 bits */
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_resize(&ba, 2) == 0);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 2);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == false);
+ CU_ASSERT(spdk_bit_array_set(ba, 1) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == true);
+
+ /* Shrink size back to 1 bit */
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_resize(&ba, 1) == 0);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 1);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == true);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == false);
+
+ /* Increase size to 65 bits */
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_resize(&ba, 65) == 0);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 65);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == true);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == false);
+ CU_ASSERT(spdk_bit_array_set(ba, 64) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 64) == true);
+
+ /* Shrink size back to 0 bits */
+ SPDK_CU_ASSERT_FATAL(spdk_bit_array_resize(&ba, 0) == 0);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_capacity(ba) == 0);
+ CU_ASSERT(spdk_bit_array_get(ba, 0) == false);
+ CU_ASSERT(spdk_bit_array_get(ba, 1) == false);
+
+ spdk_bit_array_free(&ba);
+}
+
+static void
+test_errors(void)
+{
+ /* Passing NULL to resize should fail. */
+ CU_ASSERT(spdk_bit_array_resize(NULL, 0) == -EINVAL);
+
+ /* Passing NULL to free is a no-op. */
+ spdk_bit_array_free(NULL);
+}
+
+static void
+test_count(void)
+{
+ struct spdk_bit_array *ba;
+ uint32_t i;
+
+ /* 0-bit array should have 0 bits set and 0 bits clear */
+ ba = spdk_bit_array_create(0);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 0);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 0);
+ spdk_bit_array_free(&ba);
+
+ /* 1-bit array */
+ ba = spdk_bit_array_create(1);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 0);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 1);
+ spdk_bit_array_set(ba, 0);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 1);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 0);
+ spdk_bit_array_free(&ba);
+
+ /* 65-bit array */
+ ba = spdk_bit_array_create(65);
+ SPDK_CU_ASSERT_FATAL(ba != NULL);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 0);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 65);
+ spdk_bit_array_set(ba, 0);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 1);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 64);
+ spdk_bit_array_set(ba, 5);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 2);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 63);
+ spdk_bit_array_set(ba, 13);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 3);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 62);
+ spdk_bit_array_clear(ba, 0);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 2);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 63);
+ for (i = 0; i < 65; i++) {
+ spdk_bit_array_set(ba, i);
+ }
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 65);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == 0);
+ for (i = 0; i < 65; i++) {
+ spdk_bit_array_clear(ba, i);
+ CU_ASSERT(spdk_bit_array_count_set(ba) == 65 - i - 1);
+ CU_ASSERT(spdk_bit_array_count_clear(ba) == i + 1);
+ }
+ spdk_bit_array_free(&ba);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("bit_array", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test_1bit", test_1bit) == NULL ||
+ CU_add_test(suite, "test_64bit", test_64bit) == NULL ||
+ CU_add_test(suite, "test_find", test_find) == NULL ||
+ CU_add_test(suite, "test_resize", test_resize) == NULL ||
+ CU_add_test(suite, "test_errors", test_errors) == NULL ||
+ CU_add_test(suite, "test_count", test_count) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/cpuset.c/.gitignore b/src/spdk/test/unit/lib/util/cpuset.c/.gitignore
new file mode 100644
index 00000000..2ca1a2d3
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/cpuset.c/.gitignore
@@ -0,0 +1 @@
+cpuset_ut
diff --git a/src/spdk/test/unit/lib/util/cpuset.c/Makefile b/src/spdk/test/unit/lib/util/cpuset.c/Makefile
new file mode 100644
index 00000000..da7a1400
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/cpuset.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = cpuset_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/cpuset.c/cpuset_ut.c b/src/spdk/test/unit/lib/util/cpuset.c/cpuset_ut.c
new file mode 100644
index 00000000..6fea0ad3
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/cpuset.c/cpuset_ut.c
@@ -0,0 +1,265 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+#include "spdk/cpuset.h"
+
+#include "spdk_cunit.h"
+
+#include "util/cpuset.c"
+
+static int
+cpuset_check_range(struct spdk_cpuset *core_mask, uint32_t min, uint32_t max, bool isset)
+{
+ uint32_t core;
+ for (core = min; core <= max; core++) {
+ if (isset != spdk_cpuset_get_cpu(core_mask, core)) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static void
+test_cpuset(void)
+{
+ uint32_t cpu;
+ struct spdk_cpuset *set = spdk_cpuset_alloc();
+
+ SPDK_CU_ASSERT_FATAL(set != NULL);
+ CU_ASSERT(spdk_cpuset_count(set) == 0);
+
+ /* Set cpu 0 */
+ spdk_cpuset_set_cpu(set, 0, true);
+ CU_ASSERT(spdk_cpuset_get_cpu(set, 0) == true);
+ CU_ASSERT(cpuset_check_range(set, 1, SPDK_CPUSET_SIZE - 1, false) == 0);
+ CU_ASSERT(spdk_cpuset_count(set) == 1);
+
+ /* Set last cpu (cpu 0 already set) */
+ spdk_cpuset_set_cpu(set, SPDK_CPUSET_SIZE - 1, true);
+ CU_ASSERT(spdk_cpuset_get_cpu(set, 0) == true);
+ CU_ASSERT(spdk_cpuset_get_cpu(set, SPDK_CPUSET_SIZE - 1) == true);
+ CU_ASSERT(cpuset_check_range(set, 1, SPDK_CPUSET_SIZE - 2, false) == 0);
+ CU_ASSERT(spdk_cpuset_count(set) == 2);
+
+ /* Clear cpu 0 (last cpu already set) */
+ spdk_cpuset_set_cpu(set, 0, false);
+ CU_ASSERT(spdk_cpuset_get_cpu(set, 0) == false);
+ CU_ASSERT(cpuset_check_range(set, 1, SPDK_CPUSET_SIZE - 2, false) == 0);
+ CU_ASSERT(spdk_cpuset_get_cpu(set, SPDK_CPUSET_SIZE - 1) == true);
+ CU_ASSERT(spdk_cpuset_count(set) == 1);
+
+ /* Set middle cpu (last cpu already set) */
+ cpu = (SPDK_CPUSET_SIZE - 1) / 2;
+ spdk_cpuset_set_cpu(set, cpu, true);
+ CU_ASSERT(spdk_cpuset_get_cpu(set, cpu) == true);
+ CU_ASSERT(spdk_cpuset_get_cpu(set, SPDK_CPUSET_SIZE - 1) == true);
+ CU_ASSERT(cpuset_check_range(set, 1, cpu - 1, false) == 0);
+ CU_ASSERT(cpuset_check_range(set, cpu + 1, SPDK_CPUSET_SIZE - 2, false) == 0);
+ CU_ASSERT(spdk_cpuset_count(set) == 2);
+
+ /* Set all cpus */
+ for (cpu = 0; cpu < SPDK_CPUSET_SIZE; cpu++) {
+ spdk_cpuset_set_cpu(set, cpu, true);
+ }
+ CU_ASSERT(cpuset_check_range(set, 0, SPDK_CPUSET_SIZE - 1, true) == 0);
+ CU_ASSERT(spdk_cpuset_count(set) == SPDK_CPUSET_SIZE);
+
+ /* Clear all cpus */
+ spdk_cpuset_zero(set);
+ CU_ASSERT(cpuset_check_range(set, 0, SPDK_CPUSET_SIZE - 1, false) == 0);
+ CU_ASSERT(spdk_cpuset_count(set) == 0);
+
+ spdk_cpuset_free(set);
+}
+
+static void
+test_cpuset_parse(void)
+{
+ int rc;
+ struct spdk_cpuset *core_mask;
+ char buf[1024];
+
+ core_mask = spdk_cpuset_alloc();
+ SPDK_CU_ASSERT_FATAL(core_mask != NULL);
+
+ /* Only core 0 should be set */
+ rc = spdk_cpuset_parse(core_mask, "0x1");
+ CU_ASSERT(rc >= 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 0, 0, true) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 1, SPDK_CPUSET_SIZE - 1, false) == 0);
+
+ /* Only core 1 should be set */
+ rc = spdk_cpuset_parse(core_mask, "[1]");
+ CU_ASSERT(rc >= 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 0, 0, false) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 1, 1, true) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 2, SPDK_CPUSET_SIZE - 1, false) == 0);
+
+ /* Set cores 0-10,12,128-254 */
+ rc = spdk_cpuset_parse(core_mask, "[0-10,12,128-254]");
+ CU_ASSERT(rc >= 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 0, 10, true) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 11, 11, false) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 12, 12, true) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 13, 127, false) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 128, 254, true) == 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 255, SPDK_CPUSET_SIZE - 1, false) == 0);
+
+ /* Set all cores */
+ snprintf(buf, sizeof(buf), "[0-%d]", SPDK_CPUSET_SIZE - 1);
+ rc = spdk_cpuset_parse(core_mask, buf);
+ CU_ASSERT(rc >= 0);
+ CU_ASSERT(cpuset_check_range(core_mask, 0, SPDK_CPUSET_SIZE - 1, true) == 0);
+
+ /* Null parameters not allowed */
+ rc = spdk_cpuset_parse(core_mask, NULL);
+ CU_ASSERT(rc < 0);
+
+ rc = spdk_cpuset_parse(NULL, "[1]");
+ CU_ASSERT(rc < 0);
+
+ /* Wrong formated core lists */
+ rc = spdk_cpuset_parse(core_mask, "");
+ CU_ASSERT(rc < 0);
+
+ rc = spdk_cpuset_parse(core_mask, "[");
+ CU_ASSERT(rc < 0);
+
+ rc = spdk_cpuset_parse(core_mask, "[]");
+ CU_ASSERT(rc < 0);
+
+ rc = spdk_cpuset_parse(core_mask, "[10--11]");
+ CU_ASSERT(rc < 0);
+
+ rc = spdk_cpuset_parse(core_mask, "[11-10]");
+ CU_ASSERT(rc < 0);
+
+ rc = spdk_cpuset_parse(core_mask, "[10-11,]");
+ CU_ASSERT(rc < 0);
+
+ rc = spdk_cpuset_parse(core_mask, "[,10-11]");
+ CU_ASSERT(rc < 0);
+
+ /* Out of range value */
+ snprintf(buf, sizeof(buf), "[%d]", SPDK_CPUSET_SIZE + 1);
+ rc = spdk_cpuset_parse(core_mask, buf);
+ CU_ASSERT(rc < 0);
+
+ /* Overflow value (UINT64_MAX * 10) */
+ rc = spdk_cpuset_parse(core_mask, "[184467440737095516150]");
+ CU_ASSERT(rc < 0);
+
+ spdk_cpuset_free(core_mask);
+}
+
+static void
+test_cpuset_fmt(void)
+{
+ int i;
+ uint32_t lcore;
+ struct spdk_cpuset *core_mask = spdk_cpuset_alloc();
+ const char *hex_mask;
+ char hex_mask_ref[SPDK_CPUSET_SIZE / 4 + 1];
+
+ /* Clear coremask. hex_mask should be "0" */
+ spdk_cpuset_zero(core_mask);
+ hex_mask = spdk_cpuset_fmt(core_mask);
+ SPDK_CU_ASSERT_FATAL(hex_mask != NULL);
+ CU_ASSERT(strcmp("0", hex_mask) == 0);
+
+ /* Set coremask 0x51234. Result should be "51234" */
+ spdk_cpuset_zero(core_mask);
+ spdk_cpuset_set_cpu(core_mask, 2, true);
+ spdk_cpuset_set_cpu(core_mask, 4, true);
+ spdk_cpuset_set_cpu(core_mask, 5, true);
+ spdk_cpuset_set_cpu(core_mask, 9, true);
+ spdk_cpuset_set_cpu(core_mask, 12, true);
+ spdk_cpuset_set_cpu(core_mask, 16, true);
+ spdk_cpuset_set_cpu(core_mask, 18, true);
+ hex_mask = spdk_cpuset_fmt(core_mask);
+ SPDK_CU_ASSERT_FATAL(hex_mask != NULL);
+ CU_ASSERT(strcmp("51234", hex_mask) == 0);
+
+ /* Set all cores */
+ spdk_cpuset_zero(core_mask);
+ for (lcore = 0; lcore < SPDK_CPUSET_SIZE; lcore++) {
+ spdk_cpuset_set_cpu(core_mask, lcore, true);
+ }
+ for (i = 0; i < SPDK_CPUSET_SIZE / 4 - 1; i++) {
+ hex_mask_ref[i] = 'f';
+ }
+ hex_mask_ref[SPDK_CPUSET_SIZE / 4 - 1] = '\0';
+
+ hex_mask = spdk_cpuset_fmt(core_mask);
+ CU_ASSERT(hex_mask != NULL);
+ if (hex_mask != NULL) {
+ CU_ASSERT(strcmp(hex_mask_ref, hex_mask) == 0);
+ }
+
+ spdk_cpuset_free(core_mask);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("cpuset", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test_cpuset", test_cpuset) == NULL ||
+ CU_add_test(suite, "test_cpuset_parse", test_cpuset_parse) == NULL ||
+ CU_add_test(suite, "test_cpuset_fmt", test_cpuset_fmt) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/crc16.c/.gitignore b/src/spdk/test/unit/lib/util/crc16.c/.gitignore
new file mode 100644
index 00000000..d026adf0
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc16.c/.gitignore
@@ -0,0 +1 @@
+crc16_ut
diff --git a/src/spdk/test/unit/lib/util/crc16.c/Makefile b/src/spdk/test/unit/lib/util/crc16.c/Makefile
new file mode 100644
index 00000000..6b8b2ad4
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc16.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = crc16_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/crc16.c/crc16_ut.c b/src/spdk/test/unit/lib/util/crc16.c/crc16_ut.c
new file mode 100644
index 00000000..8b05e900
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc16.c/crc16_ut.c
@@ -0,0 +1,80 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/crc16.c"
+
+static void
+test_crc16_t10dif(void)
+{
+ uint16_t crc;
+ char buf[] = "123456789";
+
+ crc = spdk_crc16_t10dif(buf, strlen(buf));
+ CU_ASSERT(crc == 0xd0db);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("crc16", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test_crc16_t10dif", test_crc16_t10dif) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/crc32_ieee.c/.gitignore b/src/spdk/test/unit/lib/util/crc32_ieee.c/.gitignore
new file mode 100644
index 00000000..40a85a93
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc32_ieee.c/.gitignore
@@ -0,0 +1 @@
+crc32_ieee_ut
diff --git a/src/spdk/test/unit/lib/util/crc32_ieee.c/Makefile b/src/spdk/test/unit/lib/util/crc32_ieee.c/Makefile
new file mode 100644
index 00000000..000e1ba6
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc32_ieee.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = crc32_ieee_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/crc32_ieee.c/crc32_ieee_ut.c b/src/spdk/test/unit/lib/util/crc32_ieee.c/crc32_ieee_ut.c
new file mode 100644
index 00000000..9a076998
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc32_ieee.c/crc32_ieee_ut.c
@@ -0,0 +1,83 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/crc32.c"
+#include "util/crc32_ieee.c"
+
+static void
+test_crc32_ieee(void)
+{
+ uint32_t crc;
+ char buf[] = "Hello world!";
+
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32_ieee_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x1b851995);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("crc32_ieee", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test_crc32_ieee", test_crc32_ieee) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/crc32c.c/.gitignore b/src/spdk/test/unit/lib/util/crc32c.c/.gitignore
new file mode 100644
index 00000000..55bedec7
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc32c.c/.gitignore
@@ -0,0 +1 @@
+crc32c_ut
diff --git a/src/spdk/test/unit/lib/util/crc32c.c/Makefile b/src/spdk/test/unit/lib/util/crc32c.c/Makefile
new file mode 100644
index 00000000..eba81722
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc32c.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = crc32c_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/crc32c.c/crc32c_ut.c b/src/spdk/test/unit/lib/util/crc32c.c/crc32c_ut.c
new file mode 100644
index 00000000..49b2f852
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/crc32c.c/crc32c_ut.c
@@ -0,0 +1,154 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/crc32.c"
+#include "util/crc32c.c"
+
+static void
+test_crc32c(void)
+{
+ uint32_t crc;
+ char buf[1024];
+
+ /* Verify a string's CRC32-C value against the known correct result. */
+ snprintf(buf, sizeof(buf), "%s", "Hello world!");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x7b98e751);
+
+ /*
+ * The main loop of the optimized CRC32-C implementation processes data in 8-byte blocks,
+ * followed by a loop to handle the 0-7 trailing bytes.
+ * Test all buffer sizes from 0 to 7 in order to hit all possible trailing byte counts.
+ */
+
+ /* 0-byte buffer should not modify CRC at all, so final result should be ~0 ^ ~0 == 0 */
+ snprintf(buf, sizeof(buf), "%s", "");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0);
+
+ /* 1-byte buffer */
+ snprintf(buf, sizeof(buf), "%s", "1");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x90F599E3);
+
+ /* 2-byte buffer */
+ snprintf(buf, sizeof(buf), "%s", "12");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x7355C460);
+
+ /* 3-byte buffer */
+ snprintf(buf, sizeof(buf), "%s", "123");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x107B2FB2);
+
+ /* 4-byte buffer */
+ snprintf(buf, sizeof(buf), "%s", "1234");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0xF63AF4EE);
+
+ /* 5-byte buffer */
+ snprintf(buf, sizeof(buf), "%s", "12345");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x18D12335);
+
+ /* 6-byte buffer */
+ snprintf(buf, sizeof(buf), "%s", "123456");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x41357186);
+
+ /* 7-byte buffer */
+ snprintf(buf, sizeof(buf), "%s", "1234567");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x124297EA);
+
+ /* Test a buffer of exactly 8 bytes (one block in the main CRC32-C loop). */
+ snprintf(buf, sizeof(buf), "%s", "12345678");
+ crc = 0xFFFFFFFFu;
+ crc = spdk_crc32c_update(buf, strlen(buf), crc);
+ crc ^= 0xFFFFFFFFu;
+ CU_ASSERT(crc == 0x6087809A);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("crc32c", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test_crc32c", test_crc32c) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/util/string.c/.gitignore b/src/spdk/test/unit/lib/util/string.c/.gitignore
new file mode 100644
index 00000000..5d85d4d9
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/string.c/.gitignore
@@ -0,0 +1 @@
+string_ut
diff --git a/src/spdk/test/unit/lib/util/string.c/Makefile b/src/spdk/test/unit/lib/util/string.c/Makefile
new file mode 100644
index 00000000..8ee11909
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/string.c/Makefile
@@ -0,0 +1,40 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+TEST_FILE = string_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/util/string.c/string_ut.c b/src/spdk/test/unit/lib/util/string.c/string_ut.c
new file mode 100644
index 00000000..2ca32cbe
--- /dev/null
+++ b/src/spdk/test/unit/lib/util/string.c/string_ut.c
@@ -0,0 +1,237 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "spdk_cunit.h"
+
+#include "util/string.c"
+
+static void
+test_parse_ip_addr(void)
+{
+ int rc;
+ char *host;
+ char *port;
+ char ip[255];
+
+ /* IPv4 */
+ snprintf(ip, 255, "%s", "192.168.0.1");
+ rc = spdk_parse_ip_addr(ip, &host, &port);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(host != NULL);
+ CU_ASSERT(strcmp(host, "192.168.0.1") == 0);
+ CU_ASSERT_EQUAL(strlen(host), 11);
+ CU_ASSERT_EQUAL(port, NULL);
+
+ /* IPv4 with port */
+ snprintf(ip, 255, "%s", "123.456.789.0:5520");
+ rc = spdk_parse_ip_addr(ip, &host, &port);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(host != NULL);
+ CU_ASSERT(strcmp(host, "123.456.789.0") == 0);
+ CU_ASSERT_EQUAL(strlen(host), 13);
+ SPDK_CU_ASSERT_FATAL(port != NULL);
+ CU_ASSERT(strcmp(port, "5520") == 0);
+ CU_ASSERT_EQUAL(strlen(port), 4);
+
+ /* IPv6 */
+ snprintf(ip, 255, "%s", "[2001:db8:85a3:8d3:1319:8a2e:370:7348]");
+ rc = spdk_parse_ip_addr(ip, &host, &port);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(host != NULL);
+ CU_ASSERT(strcmp(host, "2001:db8:85a3:8d3:1319:8a2e:370:7348") == 0);
+ CU_ASSERT_EQUAL(strlen(host), 36);
+ CU_ASSERT_EQUAL(port, NULL);
+
+ /* IPv6 with port */
+ snprintf(ip, 255, "%s", "[2001:db8:85a3:8d3:1319:8a2e:370:7348]:443");
+ rc = spdk_parse_ip_addr(ip, &host, &port);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(host != NULL);
+ CU_ASSERT(strcmp(host, "2001:db8:85a3:8d3:1319:8a2e:370:7348") == 0);
+ CU_ASSERT_EQUAL(strlen(host), 36);
+ SPDK_CU_ASSERT_FATAL(port != NULL);
+ CU_ASSERT(strcmp(port, "443") == 0);
+ CU_ASSERT_EQUAL(strlen(port), 3);
+
+ /* IPv6 dangling colon */
+ snprintf(ip, 255, "%s", "[2001:db8:85a3:8d3:1319:8a2e:370:7348]:");
+ rc = spdk_parse_ip_addr(ip, &host, &port);
+ CU_ASSERT_EQUAL(rc, 0);
+ SPDK_CU_ASSERT_FATAL(host != NULL);
+ CU_ASSERT(strcmp(host, "2001:db8:85a3:8d3:1319:8a2e:370:7348") == 0);
+ CU_ASSERT_EQUAL(strlen(host), 36);
+ CU_ASSERT_EQUAL(port, NULL);
+}
+
+static void
+test_str_chomp(void)
+{
+ char s[1024];
+
+ /* One \n newline */
+ snprintf(s, sizeof(s), "%s", "hello world\n");
+ CU_ASSERT(spdk_str_chomp(s) == 1);
+ CU_ASSERT(strcmp(s, "hello world") == 0);
+
+ /* One \r\n newline */
+ snprintf(s, sizeof(s), "%s", "hello world\r\n");
+ CU_ASSERT(spdk_str_chomp(s) == 2);
+ CU_ASSERT(strcmp(s, "hello world") == 0);
+
+ /* No newlines */
+ snprintf(s, sizeof(s), "%s", "hello world");
+ CU_ASSERT(spdk_str_chomp(s) == 0);
+ CU_ASSERT(strcmp(s, "hello world") == 0);
+
+ /* Two newlines */
+ snprintf(s, sizeof(s), "%s", "hello world\n\n");
+ CU_ASSERT(spdk_str_chomp(s) == 2);
+ CU_ASSERT(strcmp(s, "hello world") == 0);
+
+ /* Empty string */
+ snprintf(s, sizeof(s), "%s", "");
+ CU_ASSERT(spdk_str_chomp(s) == 0);
+ CU_ASSERT(strcmp(s, "") == 0);
+
+ /* One-character string with only \n */
+ snprintf(s, sizeof(s), "%s", "\n");
+ CU_ASSERT(spdk_str_chomp(s) == 1);
+ CU_ASSERT(strcmp(s, "") == 0);
+
+ /* One-character string without a newline */
+ snprintf(s, sizeof(s), "%s", "a");
+ CU_ASSERT(spdk_str_chomp(s) == 0);
+ CU_ASSERT(strcmp(s, "a") == 0);
+}
+
+static void
+test_parse_capacity(void)
+{
+ char str[128];
+ uint64_t cap;
+ int rc;
+ bool has_prefix = true;
+
+ rc = spdk_parse_capacity("472", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 472);
+ CU_ASSERT(has_prefix == false);
+
+ snprintf(str, sizeof(str), "%"PRIu64, UINT64_MAX);
+ rc = spdk_parse_capacity(str, &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == UINT64_MAX);
+ CU_ASSERT(has_prefix == false);
+
+ rc = spdk_parse_capacity("12k", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 12 * 1024);
+ CU_ASSERT(has_prefix == true);
+
+ rc = spdk_parse_capacity("12K", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 12 * 1024);
+ CU_ASSERT(has_prefix == true);
+
+ rc = spdk_parse_capacity("12KB", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 12 * 1024);
+ CU_ASSERT(has_prefix == true);
+
+ rc = spdk_parse_capacity("100M", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 100 * 1024 * 1024);
+ CU_ASSERT(has_prefix == true);
+
+ rc = spdk_parse_capacity("128M", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 128 * 1024 * 1024);
+ CU_ASSERT(has_prefix == true);
+
+ rc = spdk_parse_capacity("4G", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 4ULL * 1024 * 1024 * 1024);
+ CU_ASSERT(has_prefix == true);
+
+ rc = spdk_parse_capacity("100M 512k", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 100ULL * 1024 * 1024);
+
+ rc = spdk_parse_capacity("12k8K", &cap, &has_prefix);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(cap == 12 * 1024);
+ CU_ASSERT(has_prefix == true);
+
+ /* Non-number */
+ rc = spdk_parse_capacity("G", &cap, &has_prefix);
+ CU_ASSERT(rc != 0);
+
+ rc = spdk_parse_capacity("darsto", &cap, &has_prefix);
+ CU_ASSERT(rc != 0);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("string", NULL, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "test_parse_ip_addr", test_parse_ip_addr) == NULL ||
+ CU_add_test(suite, "test_str_chomp", test_str_chomp) == NULL ||
+ CU_add_test(suite, "test_parse_capacity", test_parse_capacity) == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+
+ CU_basic_run_tests();
+
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}
diff --git a/src/spdk/test/unit/lib/vhost/Makefile b/src/spdk/test/unit/lib/vhost/Makefile
new file mode 100644
index 00000000..0f569f6d
--- /dev/null
+++ b/src/spdk/test/unit/lib/vhost/Makefile
@@ -0,0 +1,44 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+
+DIRS-y = vhost.c
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+include $(SPDK_ROOT_DIR)/mk/spdk.subdirs.mk
diff --git a/src/spdk/test/unit/lib/vhost/test_vhost.c b/src/spdk/test/unit/lib/vhost/test_vhost.c
new file mode 100644
index 00000000..437e1230
--- /dev/null
+++ b/src/spdk/test/unit/lib/vhost/test_vhost.c
@@ -0,0 +1,121 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "CUnit/Basic.h"
+#include "spdk_cunit.h"
+#include "spdk_internal/mock.h"
+#include "spdk/thread.h"
+
+#include "unit/lib/json_mock.c"
+
+struct spdk_conf_section {
+ struct spdk_conf_section *next;
+ char *name;
+ int num;
+ struct spdk_conf_item *item;
+};
+
+DEFINE_STUB(spdk_vhost_vq_get_desc, int, (struct spdk_vhost_dev *vdev,
+ struct spdk_vhost_virtqueue *vq, uint16_t req_idx, struct vring_desc **desc,
+ struct vring_desc **desc_table, uint32_t *desc_table_size), 0);
+DEFINE_STUB(spdk_vhost_vring_desc_is_wr, bool, (struct vring_desc *cur_desc), false);
+DEFINE_STUB(spdk_vhost_vring_desc_to_iov, int, (struct spdk_vhost_dev *vdev, struct iovec *iov,
+ uint16_t *iov_index, const struct vring_desc *desc), 0);
+DEFINE_STUB_V(spdk_vhost_vq_used_ring_enqueue, (struct spdk_vhost_dev *vdev,
+ struct spdk_vhost_virtqueue *vq, uint16_t id, uint32_t len));
+DEFINE_STUB(spdk_vhost_vring_desc_get_next, int, (struct vring_desc **desc,
+ struct vring_desc *desc_table, uint32_t desc_table_size), 0);
+DEFINE_STUB(spdk_vhost_vq_avail_ring_get, uint16_t, (struct spdk_vhost_virtqueue *vq,
+ uint16_t *reqs, uint16_t reqs_len), 0);
+DEFINE_STUB(spdk_vhost_vq_used_signal, int, (struct spdk_vhost_dev *vdev,
+ struct spdk_vhost_virtqueue *virtqueue), 0);
+DEFINE_STUB_V(spdk_vhost_dev_used_signal, (struct spdk_vhost_dev *vdev));
+DEFINE_STUB_V(spdk_vhost_dev_mem_register, (struct spdk_vhost_dev *vdev));
+DEFINE_STUB_P(spdk_vhost_dev_find, struct spdk_vhost_dev, (const char *ctrlr_name), {0});
+DEFINE_STUB_P(spdk_conf_first_section, struct spdk_conf_section, (struct spdk_conf *cp), {0});
+DEFINE_STUB(spdk_conf_section_match_prefix, bool, (const struct spdk_conf_section *sp,
+ const char *name_prefix), false);
+DEFINE_STUB_P(spdk_conf_next_section, struct spdk_conf_section, (struct spdk_conf_section *sp), {0});
+DEFINE_STUB_P(spdk_conf_section_get_name, const char, (const struct spdk_conf_section *sp), {0});
+DEFINE_STUB(spdk_conf_section_get_boolval, bool, (struct spdk_conf_section *sp, const char *key,
+ bool default_val), false);
+DEFINE_STUB_P(spdk_conf_section_get_nmval, char, (struct spdk_conf_section *sp, const char *key,
+ int idx1, int idx2), {0});
+DEFINE_STUB_V(spdk_vhost_dev_mem_unregister, (struct spdk_vhost_dev *vdev));
+DEFINE_STUB(spdk_vhost_event_send, int, (struct spdk_vhost_dev *vdev, spdk_vhost_event_fn cb_fn,
+ void *arg, unsigned timeout_sec, const char *errmsg), 0);
+DEFINE_STUB(spdk_env_get_socket_id, uint32_t, (uint32_t core), 0);
+DEFINE_STUB_V(spdk_vhost_dev_backend_event_done, (void *event_ctx, int response));
+DEFINE_STUB_V(spdk_vhost_lock, (void));
+DEFINE_STUB_V(spdk_vhost_unlock, (void));
+DEFINE_STUB(spdk_env_get_current_core, uint32_t, (void), 0);
+DEFINE_STUB_V(spdk_vhost_call_external_event, (const char *ctrlr_name, spdk_vhost_event_fn fn,
+ void *arg));
+DEFINE_STUB(spdk_vhost_vring_desc_has_next, bool, (struct vring_desc *cur_desc), false);
+DEFINE_STUB_VP(spdk_vhost_gpa_to_vva, (struct spdk_vhost_dev *vdev, uint64_t addr, uint64_t len),
+{0});
+DEFINE_STUB(spdk_scsi_dev_get_id, int, (const struct spdk_scsi_dev *dev), {0});
+
+/* This sets spdk_vhost_dev_unregister to either to fail or success */
+DEFINE_STUB(spdk_vhost_dev_unregister_fail, bool, (void), false);
+/* This sets spdk_vhost_dev_register to either to fail or success */
+DEFINE_STUB(spdk_vhost_dev_register_fail, bool, (void), false);
+
+static struct spdk_vhost_dev *g_spdk_vhost_device;
+int
+spdk_vhost_dev_register(struct spdk_vhost_dev *vdev, const char *name, const char *mask_str,
+ const struct spdk_vhost_dev_backend *backend)
+{
+ if (spdk_vhost_dev_register_fail()) {
+ return -1;
+ }
+
+ vdev->backend = backend;
+ g_spdk_vhost_device = vdev;
+ vdev->registered = true;
+ return 0;
+}
+
+int
+spdk_vhost_dev_unregister(struct spdk_vhost_dev *vdev)
+{
+ if (spdk_vhost_dev_unregister_fail()) {
+ return -1;
+ }
+
+ free(vdev->name);
+ g_spdk_vhost_device = NULL;
+ return 0;
+}
diff --git a/src/spdk/test/unit/lib/vhost/vhost.c/.gitignore b/src/spdk/test/unit/lib/vhost/vhost.c/.gitignore
new file mode 100644
index 00000000..16cead8f
--- /dev/null
+++ b/src/spdk/test/unit/lib/vhost/vhost.c/.gitignore
@@ -0,0 +1 @@
+vhost_ut
diff --git a/src/spdk/test/unit/lib/vhost/vhost.c/Makefile b/src/spdk/test/unit/lib/vhost/vhost.c/Makefile
new file mode 100644
index 00000000..3c30f5a8
--- /dev/null
+++ b/src/spdk/test/unit/lib/vhost/vhost.c/Makefile
@@ -0,0 +1,42 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) Intel Corporation.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+SPDK_ROOT_DIR := $(abspath $(CURDIR)/../../../../..)
+include $(SPDK_ROOT_DIR)/mk/spdk.common.mk
+include $(SPDK_ROOT_DIR)/mk/spdk.app.mk
+
+CFLAGS += -I$(SPDK_ROOT_DIR)/lib/vhost/rte_vhost
+CFLAGS += $(ENV_CFLAGS)
+TEST_FILE = vhost_ut.c
+
+include $(SPDK_ROOT_DIR)/mk/spdk.unittest.mk
diff --git a/src/spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c b/src/spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c
new file mode 100644
index 00000000..49e879ed
--- /dev/null
+++ b/src/spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c
@@ -0,0 +1,364 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "spdk/stdinc.h"
+
+#include "CUnit/Basic.h"
+#include "spdk_cunit.h"
+#include "spdk/thread.h"
+#include "spdk_internal/mock.h"
+#include "common/lib/test_env.c"
+#include "unit/lib/json_mock.c"
+
+#include "vhost/vhost.c"
+
+DEFINE_STUB(rte_vhost_driver_unregister, int, (const char *path), 0);
+DEFINE_STUB(spdk_event_allocate, struct spdk_event *,
+ (uint32_t lcore, spdk_event_fn fn, void *arg1, void *arg2), NULL);
+DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
+DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
+
+static struct spdk_cpuset *g_app_core_mask;
+struct spdk_cpuset *spdk_app_get_core_mask(void)
+{
+ if (g_app_core_mask == NULL) {
+ g_app_core_mask = spdk_cpuset_alloc();
+ spdk_cpuset_set_cpu(g_app_core_mask, 0, true);
+ }
+ return g_app_core_mask;
+}
+
+int
+spdk_app_parse_core_mask(const char *mask, struct spdk_cpuset *cpumask)
+{
+ int ret;
+ struct spdk_cpuset *validmask;
+
+ ret = spdk_cpuset_parse(cpumask, mask);
+ if (ret < 0) {
+ return ret;
+ }
+
+ validmask = spdk_app_get_core_mask();
+ spdk_cpuset_and(cpumask, validmask);
+
+ return 0;
+}
+
+DEFINE_STUB(spdk_env_get_first_core, uint32_t, (void), 0);
+DEFINE_STUB(spdk_env_get_next_core, uint32_t, (uint32_t prev_core), 0);
+DEFINE_STUB(spdk_env_get_last_core, uint32_t, (void), 0);
+DEFINE_STUB_V(spdk_app_stop, (int rc));
+DEFINE_STUB_V(spdk_event_call, (struct spdk_event *event));
+DEFINE_STUB(spdk_poller_register, struct spdk_poller *, (spdk_poller_fn fn, void *arg,
+ uint64_t period_microseconds), NULL);
+DEFINE_STUB_V(spdk_poller_unregister, (struct spdk_poller **ppoller));
+DEFINE_STUB(spdk_iommu_mem_unregister, int, (uint64_t addr, uint64_t len), 0);
+DEFINE_STUB(rte_vhost_get_mem_table, int, (int vid, struct rte_vhost_memory **mem), 0);
+DEFINE_STUB(rte_vhost_get_negotiated_features, int, (int vid, uint64_t *features), 0);
+DEFINE_STUB(rte_vhost_get_vhost_vring, int,
+ (int vid, uint16_t vring_idx, struct rte_vhost_vring *vring), 0);
+DEFINE_STUB(rte_vhost_enable_guest_notification, int,
+ (int vid, uint16_t queue_id, int enable), 0);
+DEFINE_STUB(rte_vhost_get_ifname, int, (int vid, char *buf, size_t len), 0);
+DEFINE_STUB(rte_vhost_get_vring_num, uint16_t, (int vid), 0);
+DEFINE_STUB(rte_vhost_driver_start, int, (const char *name), 0);
+DEFINE_STUB(rte_vhost_driver_callback_register, int,
+ (const char *path, struct vhost_device_ops const *const ops), 0);
+DEFINE_STUB(rte_vhost_driver_disable_features, int, (const char *path, uint64_t features), 0);
+DEFINE_STUB(rte_vhost_driver_set_features, int, (const char *path, uint64_t features), 0);
+DEFINE_STUB(rte_vhost_driver_register, int, (const char *path, uint64_t flags), 0);
+DEFINE_STUB_V(rte_vhost_log_used_vring, (int vid, uint16_t vring_idx, uint64_t offset,
+ uint64_t len));
+DEFINE_STUB_V(rte_vhost_log_write, (int vid, uint64_t addr, uint64_t len));
+DEFINE_STUB(spdk_vhost_scsi_controller_construct, int, (void), 0);
+DEFINE_STUB(spdk_vhost_blk_controller_construct, int, (void), 0);
+DEFINE_STUB(spdk_vhost_nvme_admin_passthrough, int, (int vid, void *cmd, void *cqe, void *buf), 0);
+DEFINE_STUB(spdk_vhost_nvme_set_cq_call, int, (int vid, uint16_t qid, int fd), 0);
+DEFINE_STUB(spdk_vhost_nvme_get_cap, int, (int vid, uint64_t *cap), 0);
+DEFINE_STUB(spdk_vhost_nvme_controller_construct, int, (void), 0);
+DEFINE_STUB(rte_vhost_set_vhost_vring_last_idx, int,
+ (int vid, uint16_t vring_idx, uint16_t last_avail_idx, uint16_t last_used_idx), 0);
+DEFINE_STUB(spdk_env_get_current_core, uint32_t, (void), 0);
+
+void *
+spdk_call_unaffinitized(void *cb(void *arg), void *arg)
+{
+ return cb(arg);
+}
+
+static struct spdk_vhost_dev_backend g_vdev_backend;
+
+static int
+test_setup(void)
+{
+ return 0;
+}
+
+static int
+alloc_vdev(struct spdk_vhost_dev **vdev_p, const char *name, const char *cpumask)
+{
+ struct spdk_vhost_dev *vdev = NULL;
+ int rc;
+
+ /* spdk_vhost_dev must be allocated on a cache line boundary. */
+ rc = posix_memalign((void **)&vdev, 64, sizeof(*vdev));
+ CU_ASSERT(rc == 0);
+ SPDK_CU_ASSERT_FATAL(vdev != NULL);
+ memset(vdev, 0, sizeof(*vdev));
+ rc = spdk_vhost_dev_register(vdev, name, cpumask, &g_vdev_backend);
+ if (rc == 0) {
+ *vdev_p = vdev;
+ } else {
+ free(vdev);
+ *vdev_p = NULL;
+ }
+
+ return rc;
+}
+
+static void
+start_vdev(struct spdk_vhost_dev *vdev)
+{
+ vdev->vid = 0;
+ vdev->lcore = 0;
+ vdev->mem = calloc(1, sizeof(*vdev->mem) + 2 * sizeof(struct rte_vhost_mem_region));
+ SPDK_CU_ASSERT_FATAL(vdev->mem != NULL);
+ vdev->mem->nregions = 2;
+ vdev->mem->regions[0].guest_phys_addr = 0;
+ vdev->mem->regions[0].size = 0x400000; /* 4 MB */
+ vdev->mem->regions[0].host_user_addr = 0x1000000;
+ vdev->mem->regions[1].guest_phys_addr = 0x400000;
+ vdev->mem->regions[1].size = 0x400000; /* 4 MB */
+ vdev->mem->regions[1].host_user_addr = 0x2000000;
+}
+
+static void
+stop_vdev(struct spdk_vhost_dev *vdev)
+{
+ free(vdev->mem);
+ vdev->mem = NULL;
+ vdev->vid = -1;
+}
+
+static void
+cleanup_vdev(struct spdk_vhost_dev *vdev)
+{
+ stop_vdev(vdev);
+ spdk_vhost_dev_unregister(vdev);
+ free(vdev);
+}
+
+static void
+desc_to_iov_test(void)
+{
+ struct spdk_vhost_dev *vdev;
+ struct iovec iov[SPDK_VHOST_IOVS_MAX];
+ uint16_t iov_index;
+ struct vring_desc desc;
+ int rc;
+
+ rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
+ SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
+ start_vdev(vdev);
+
+ /* Test simple case where iov falls fully within a 2MB page. */
+ desc.addr = 0x110000;
+ desc.len = 0x1000;
+ iov_index = 0;
+ rc = spdk_vhost_vring_desc_to_iov(vdev, iov, &iov_index, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iov_index == 1);
+ CU_ASSERT(iov[0].iov_base == (void *)0x1110000);
+ CU_ASSERT(iov[0].iov_len == 0x1000);
+ /*
+ * Always memset the iov to ensure each test validates data written by its call
+ * to the function under test.
+ */
+ memset(iov, 0, sizeof(iov));
+
+ /* Same test, but ensure it respects the non-zero starting iov_index. */
+ iov_index = SPDK_VHOST_IOVS_MAX - 1;
+ rc = spdk_vhost_vring_desc_to_iov(vdev, iov, &iov_index, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
+ CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x1110000);
+ CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x1000);
+ memset(iov, 0, sizeof(iov));
+
+ /* Test for failure if iov_index already equals SPDK_VHOST_IOVS_MAX. */
+ iov_index = SPDK_VHOST_IOVS_MAX;
+ rc = spdk_vhost_vring_desc_to_iov(vdev, iov, &iov_index, &desc);
+ CU_ASSERT(rc != 0);
+ memset(iov, 0, sizeof(iov));
+
+ /* Test case where iov spans a 2MB boundary, but does not span a vhost memory region. */
+ desc.addr = 0x1F0000;
+ desc.len = 0x20000;
+ iov_index = 0;
+ rc = spdk_vhost_vring_desc_to_iov(vdev, iov, &iov_index, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iov_index == 1);
+ CU_ASSERT(iov[0].iov_base == (void *)0x11F0000);
+ CU_ASSERT(iov[0].iov_len == 0x20000);
+ memset(iov, 0, sizeof(iov));
+
+ /* Same test, but ensure it respects the non-zero starting iov_index. */
+ iov_index = SPDK_VHOST_IOVS_MAX - 1;
+ rc = spdk_vhost_vring_desc_to_iov(vdev, iov, &iov_index, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
+ CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x11F0000);
+ CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x20000);
+ memset(iov, 0, sizeof(iov));
+
+ /* Test case where iov spans a vhost memory region. */
+ desc.addr = 0x3F0000;
+ desc.len = 0x20000;
+ iov_index = 0;
+ rc = spdk_vhost_vring_desc_to_iov(vdev, iov, &iov_index, &desc);
+ CU_ASSERT(rc == 0);
+ CU_ASSERT(iov_index == 2);
+ CU_ASSERT(iov[0].iov_base == (void *)0x13F0000);
+ CU_ASSERT(iov[0].iov_len == 0x10000);
+ CU_ASSERT(iov[1].iov_base == (void *)0x2000000);
+ CU_ASSERT(iov[1].iov_len == 0x10000);
+ memset(iov, 0, sizeof(iov));
+
+ cleanup_vdev(vdev);
+
+ CU_ASSERT(true);
+}
+
+static void
+create_controller_test(void)
+{
+ struct spdk_vhost_dev *vdev, *vdev2;
+ int ret;
+ char long_name[PATH_MAX];
+
+ /* NOTE: spdk_app_get_core_mask stub always sets coremask 0x01 */
+
+ /* Create device with no name */
+ ret = alloc_vdev(&vdev, NULL, "0x1");
+ CU_ASSERT(ret != 0);
+
+ /* Create device with incorrect cpumask */
+ ret = alloc_vdev(&vdev, "vdev_name_0", "0x2");
+ CU_ASSERT(ret != 0);
+
+ /* Create device with too long name and path */
+ memset(long_name, 'x', sizeof(long_name));
+ long_name[PATH_MAX - 1] = 0;
+ snprintf(dev_dirname, sizeof(dev_dirname), "some_path/");
+ ret = alloc_vdev(&vdev, long_name, "0x1");
+ CU_ASSERT(ret != 0);
+ dev_dirname[0] = 0;
+
+ /* Create device when device name is already taken */
+ ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
+ SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
+ ret = alloc_vdev(&vdev2, "vdev_name_0", "0x1");
+ CU_ASSERT(ret != 0);
+ cleanup_vdev(vdev);
+}
+
+static void
+dev_find_by_vid_test(void)
+{
+ struct spdk_vhost_dev *vdev, *tmp;
+ int rc;
+
+ rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
+ SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
+
+ tmp = spdk_vhost_dev_find_by_vid(vdev->vid);
+ CU_ASSERT(tmp == vdev);
+
+ /* Search for a device with incorrect vid */
+ tmp = spdk_vhost_dev_find_by_vid(vdev->vid + 0xFF);
+ CU_ASSERT(tmp == NULL);
+
+ cleanup_vdev(vdev);
+}
+
+static void
+remove_controller_test(void)
+{
+ struct spdk_vhost_dev *vdev;
+ int ret;
+
+ ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
+ SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
+
+ /* Remove device when controller is in use */
+ start_vdev(vdev);
+ ret = spdk_vhost_dev_unregister(vdev);
+ CU_ASSERT(ret != 0);
+
+ cleanup_vdev(vdev);
+}
+
+int
+main(int argc, char **argv)
+{
+ CU_pSuite suite = NULL;
+ unsigned int num_failures;
+
+ if (CU_initialize_registry() != CUE_SUCCESS) {
+ return CU_get_error();
+ }
+
+ suite = CU_add_suite("vhost_suite", test_setup, NULL);
+ if (suite == NULL) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ if (
+ CU_add_test(suite, "desc_to_iov", desc_to_iov_test) == NULL ||
+ CU_add_test(suite, "create_controller", create_controller_test) == NULL ||
+ CU_add_test(suite, "dev_find_by_vid", dev_find_by_vid_test) == NULL ||
+ CU_add_test(suite, "remove_controller", remove_controller_test) == NULL
+ ) {
+ CU_cleanup_registry();
+ return CU_get_error();
+ }
+
+ CU_basic_set_mode(CU_BRM_VERBOSE);
+ CU_basic_run_tests();
+ num_failures = CU_get_number_of_failures();
+ CU_cleanup_registry();
+
+ return num_failures;
+}